source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
kafka_msg_handler.py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Kafka message handler."""
import itertools
import json
import logging
import os
import re
import shutil
import tempfile
import threading
import time
import traceback
from tarfile import ReadError
from tarfile import TarFile
import requests
from confluent_kafka import Consumer
from confluent_kafka import Producer
from confluent_kafka import TopicPartition
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db import InterfaceError
from django.db import OperationalError
from kombu.exceptions import OperationalError as RabbitOperationalError
from api.common import log_json
from kafka_utils.utils import is_kafka_connected
from masu.config import Config
from masu.database.report_manifest_db_accessor import ReportManifestDBAccessor
from masu.external import UNCOMPRESSED
from masu.external.accounts_accessor import AccountsAccessor
from masu.external.accounts_accessor import AccountsAccessorError
from masu.external.downloader.ocp.ocp_report_downloader import create_daily_archives
from masu.external.downloader.ocp.ocp_report_downloader import OCPReportDownloader
from masu.processor._tasks.process import _process_report_file
from masu.processor.report_processor import ReportProcessorDBError
from masu.processor.report_processor import ReportProcessorError
from masu.processor.tasks import OCP_QUEUE
from masu.processor.tasks import record_all_manifest_files
from masu.processor.tasks import record_report_status
from masu.processor.tasks import summarize_reports
from masu.prometheus_stats import KAFKA_CONNECTION_ERRORS_COUNTER
from masu.util.ocp import common as utils
LOG = logging.getLogger(__name__)
SUCCESS_CONFIRM_STATUS = "success"
FAILURE_CONFIRM_STATUS = "failure"
class KafkaMsgHandlerError(Exception):
"""Kafka msg handler error."""
def close_and_set_db_connection(): # pragma: no cover
"""Close the db connection and set to None."""
if connections[DEFAULT_DB_ALIAS].connection:
connections[DEFAULT_DB_ALIAS].connection.close()
connections[DEFAULT_DB_ALIAS].connection = None
def delivery_callback(err, msg):
"""Acknowledge message success or failure."""
if err is not None:
LOG.error(f"Failed to deliver message: {msg}: {err}")
else:
LOG.info("Validation message delivered.")
def create_manifest_entries(report_meta, request_id, context={}):
"""
Creates manifest database entries for report processing tracking.
Args:
report_meta (dict): Report context dictionary from extract_payload.
request_id (String): Identifier associated with the payload
context (Dict): Context for logging (account, etc)
Returns:
manifest_id (Integer): Manifest identifier of the created db entry.
"""
downloader = OCPReportDownloader(
report_meta.get("schema_name"),
report_meta.get("cluster_id"),
None,
provider_uuid=report_meta.get("provider_uuid"),
request_id=request_id,
account=context.get("account", "no_account"),
)
return downloader._prepare_db_manifest_record(report_meta)
def get_account_from_cluster_id(cluster_id, manifest_uuid, context={}):
"""
Returns the provider details for a given OCP cluster id.
Args:
cluster_id (String): Cluster UUID.
manifest_uuid (String): Identifier associated with the payload manifest
context (Dict): Context for logging (account, etc)
Returns:
(dict) - keys: value
authentication: String,
customer_name: String,
billing_source: String,
provider_type: String,
schema_name: String,
provider_uuid: String
"""
account = None
provider_uuid = utils.get_provider_uuid_from_cluster_id(cluster_id)
if provider_uuid:
msg = f"Found provider_uuid: {str(provider_uuid)} for cluster_id: {str(cluster_id)}"
LOG.info(log_json(manifest_uuid, msg, context))
if context:
context["provider_uuid"] = provider_uuid
account = get_account(provider_uuid, manifest_uuid, context)
return account
def download_payload(request_id, url, context={}):
"""
Download the payload from ingress to temporary location.
Args:
request_id (String): Identifier associated with the payload
url (String): URL path to payload in the Insights upload service..
context (Dict): Context for logging (account, etc)
Returns:
Tuple: temp_dir (String), temp_file (String)
"""
# Create temporary directory for initial file staging and verification in the
# OpenShift PVC directory so that any failures can be triaged in the event
# the pod goes down.
os.makedirs(Config.PVC_DIR, exist_ok=True)
temp_dir = tempfile.mkdtemp(dir=Config.PVC_DIR)
# Download file from quarantine bucket as tar.gz
try:
download_response = requests.get(url)
download_response.raise_for_status()
except requests.exceptions.HTTPError as err:
shutil.rmtree(temp_dir)
msg = f"Unable to download file. Error: {str(err)}"
LOG.warning(log_json(request_id, msg))
raise KafkaMsgHandlerError(msg)
sanitized_request_id = re.sub("[^A-Za-z0-9]+", "", request_id)
gzip_filename = f"{sanitized_request_id}.tar.gz"
temp_file = f"{temp_dir}/{gzip_filename}"
try:
temp_file_hdl = open(temp_file, "wb")
temp_file_hdl.write(download_response.content)
temp_file_hdl.close()
except (OSError, IOError) as error:
shutil.rmtree(temp_dir)
msg = f"Unable to write file. Error: {str(error)}"
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError(msg)
return (temp_dir, temp_file, gzip_filename)
def extract_payload_contents(request_id, out_dir, tarball_path, tarball, context={}):
"""
Extract the payload contents into a temporary location.
Args:
request_id (String): Identifier associated with the payload
out_dir (String): temporary directory to extract data to
tarball_path (String): the path to the payload file to extract
tarball (String): the payload file to extract
context (Dict): Context for logging (account, etc)
Returns:
(String): path to manifest file
"""
# Extract tarball into temp directory
if not os.path.isfile(tarball_path):
msg = f"Unable to find tar file {tarball_path}."
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError("Extraction failure, file not found.")
try:
mytar = TarFile.open(tarball_path, mode="r:gz")
mytar.extractall(path=out_dir)
files = mytar.getnames()
manifest_path = [manifest for manifest in files if "manifest.json" in manifest]
except (ReadError, EOFError, OSError) as error:
msg = f"Unable to untar file {tarball_path}. Reason: {str(error)}"
LOG.warning(log_json(request_id, msg, context))
shutil.rmtree(out_dir)
raise KafkaMsgHandlerError("Extraction failure.")
if not manifest_path:
msg = "No manifest found in payload."
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError("No manifest found in payload.")
return manifest_path
def construct_parquet_reports(request_id, context, report_meta, payload_destination_path, report_file):
"""Build, upload and convert parquet reports."""
daily_parquet_files = create_daily_archives(
request_id,
report_meta["account"],
report_meta["provider_uuid"],
report_file,
payload_destination_path,
report_meta["manifest_id"],
report_meta["date"],
context,
)
return daily_parquet_files
# pylint: disable=too-many-locals
def extract_payload(url, request_id, context={}): # noqa: C901
"""
Extract OCP usage report payload into local directory structure.
Payload is expected to be a .tar.gz file that contains:
1. manifest.json - dictionary containing usage report details needed
for report processing.
Dictionary Contains:
files - names of .csv usage reports for the manifest
date - DateTime that the payload was created
uuid - uuid for payload
cluster_id - OCP cluster ID.
2. *.csv - Actual usage report for the cluster. Format is:
Format is: <uuid>_report_name.csv
On successful completion the report and manifest will be in a directory
structure that the OCPReportDownloader is expecting.
Ex: /var/tmp/insights_local/my-ocp-cluster-1/20181001-20181101
Once the files are extracted:
1. Provider account is retrieved for the cluster id. If no account is found we return.
2. Manifest database record is created which will establish the assembly_id and number of files
3. Report stats database record is created and is used as a filter to determine if the file
has already been processed.
4. All report files that have not been processed will have the local path to that report file
added to the report_meta context dictionary for that file.
5. Report file context dictionaries that require processing is added to a list which will be
passed to the report processor. All context from report_meta is used by the processor.
Args:
url (String): URL path to payload in the Insights upload service..
request_id (String): Identifier associated with the payload
context (Dict): Context for logging (account, etc)
Returns:
[dict]: keys: value
files: [String],
date: DateTime,
cluster_id: String
manifest_path: String,
provider_uuid: String,
provider_type: String
schema_name: String
manifest_id: Integer
current_file: String
"""
temp_dir, temp_file_path, temp_file = download_payload(request_id, url, context)
manifest_path = extract_payload_contents(request_id, temp_dir, temp_file_path, temp_file, context)
# Open manifest.json file and build the payload dictionary.
full_manifest_path = f"{temp_dir}/{manifest_path[0]}"
report_meta = utils.get_report_details(os.path.dirname(full_manifest_path))
# Filter and get account from payload's cluster-id
cluster_id = report_meta.get("cluster_id")
manifest_uuid = report_meta.get("uuid", request_id)
LOG.info(
log_json(
request_id,
f"Payload with the request id {request_id} from cluster {cluster_id}"
+ f" is part of the report with manifest id {manifest_uuid}",
)
)
if context:
context["cluster_id"] = cluster_id
account = get_account_from_cluster_id(cluster_id, manifest_uuid, context)
if not account:
msg = f"Recieved unexpected OCP report from {cluster_id}"
LOG.warning(log_json(manifest_uuid, msg, context))
shutil.rmtree(temp_dir)
return None, manifest_uuid
schema_name = account.get("schema_name")
provider_type = account.get("provider_type")
context["account"] = schema_name[4:]
context["provider_type"] = provider_type
report_meta["provider_uuid"] = account.get("provider_uuid")
report_meta["provider_type"] = provider_type
report_meta["schema_name"] = schema_name
report_meta["account"] = schema_name[4:]
report_meta["request_id"] = request_id
report_meta["tracing_id"] = manifest_uuid
# Create directory tree for report.
usage_month = utils.month_date_range(report_meta.get("date"))
destination_dir = f"{Config.INSIGHTS_LOCAL_REPORT_DIR}/{report_meta.get('cluster_id')}/{usage_month}"
os.makedirs(destination_dir, exist_ok=True)
# Copy manifest
manifest_destination_path = f"{destination_dir}/{os.path.basename(report_meta.get('manifest_path'))}"
shutil.copy(report_meta.get("manifest_path"), manifest_destination_path)
# Save Manifest
report_meta["manifest_id"] = create_manifest_entries(report_meta, request_id, context)
# Copy report payload
report_metas = []
for report_file in report_meta.get("files"):
current_meta = report_meta.copy()
subdirectory = os.path.dirname(full_manifest_path)
payload_source_path = f"{subdirectory}/{report_file}"
payload_destination_path = f"{destination_dir}/{report_file}"
try:
shutil.copy(payload_source_path, payload_destination_path)
current_meta["current_file"] = payload_destination_path
record_all_manifest_files(report_meta["manifest_id"], report_meta.get("files"), manifest_uuid)
if not record_report_status(report_meta["manifest_id"], report_file, manifest_uuid, context):
msg = f"Successfully extracted OCP for {report_meta.get('cluster_id')}/{usage_month}"
LOG.info(log_json(manifest_uuid, msg, context))
construct_parquet_reports(request_id, context, report_meta, payload_destination_path, report_file)
report_metas.append(current_meta)
else:
# Report already processed
pass
except FileNotFoundError:
msg = f"File {str(report_file)} has not downloaded yet."
LOG.debug(log_json(manifest_uuid, msg, context))
# Remove temporary directory and files
shutil.rmtree(temp_dir)
return report_metas, manifest_uuid
@KAFKA_CONNECTION_ERRORS_COUNTER.count_exceptions()
def send_confirmation(request_id, status): # pragma: no cover
"""
Send kafka validation message to Insights Upload service.
When a new file lands for topic 'hccm' we must validate it
so that it will be made permanently available to other
apps listening on the 'platform.upload.available' topic.
Args:
request_id (String): Request ID for file being confirmed.
status (String): Either 'success' or 'failure'
Returns:
None
"""
producer = get_producer()
validation = {"request_id": request_id, "validation": status}
msg = bytes(json.dumps(validation), "utf-8")
producer.produce(Config.VALIDATION_TOPIC, value=msg, callback=delivery_callback)
# Wait up to 1 second for events. Callbacks will be invoked during
# this method call if the message is acknowledged.
# `flush` makes this process synchronous compared to async with `poll`
producer.flush(1)
def handle_message(msg):
"""
Handle messages from message pending queue.
Handle's messages with topics: 'platform.upload.hccm',
and 'platform.upload.available'.
The OCP cost usage payload will land on topic hccm.
These messages will be extracted into the local report
directory structure. Once the file has been verified
(successfully extracted) we will report the status to
the Insights Upload Service so the file can be made available
to other apps on the service.
Messages on the available topic are messages that have
been verified by an app on the Insights upload service.
For now we are just logging the URL for demonstration purposes.
In the future if we want to maintain a URL to our report files
in the upload service we could look for hashes for files that
we have previously validated on the hccm topic.
Args:
msg - Upload Service message containing usage payload information.
Returns:
(String, [dict]) - String: Upload Service confirmation status
[dict]: keys: value
files: [String],
date: DateTime,
cluster_id: String
manifest_path: String,
provider_uuid: String,
provider_type: String
schema_name: String
manifest_id: Integer
current_file: String
"""
if msg.topic() == Config.HCCM_TOPIC:
value = json.loads(msg.value().decode("utf-8"))
request_id = value.get("request_id", "no_request_id")
account = value.get("account", "no_account")
context = {"account": account}
try:
msg = f"Extracting Payload for msg: {str(value)}"
LOG.info(log_json(request_id, msg, context))
report_metas, manifest_uuid = extract_payload(value["url"], request_id, context)
return SUCCESS_CONFIRM_STATUS, report_metas, manifest_uuid
except (OperationalError, InterfaceError) as error:
close_and_set_db_connection()
msg = f"Unable to extract payload, db closed. {type(error).__name__}: {error}"
LOG.error(log_json(request_id, msg, context))
raise KafkaMsgHandlerError(msg)
except Exception as error: # noqa
traceback.print_exc()
msg = f"Unable to extract payload. Error: {type(error).__name__}: {error}"
LOG.warning(log_json(request_id, msg, context))
return FAILURE_CONFIRM_STATUS, None, None
else:
LOG.error("Unexpected Message")
return None, None, None
def get_account(provider_uuid, manifest_uuid, context={}):
"""
Retrieve a provider's account configuration needed for processing.
Args:
provider_uuid (String): Provider unique identifier.
manifest_uuid (String): Identifier associated with the payload manifest
context (Dict): Context for logging (account, etc)
Returns:
(dict) - keys: value
authentication: String,
customer_name: String,
billing_source: String,
provider_type: String,
schema_name: String,
provider_uuid: String
"""
all_accounts = []
try:
all_accounts = AccountsAccessor().get_accounts(provider_uuid)
except AccountsAccessorError as error:
msg = f"Unable to get accounts. Error: {str(error)}"
LOG.warning(log_json(manifest_uuid, msg, context))
return None
return all_accounts.pop() if all_accounts else None
def summarize_manifest(report_meta, manifest_uuid):
"""
Kick off manifest summary when all report files have completed line item processing.
Args:
manifest_uuid (string) - The id associated with the payload manifest
report (Dict) - keys: value
schema_name: String,
manifest_id: Integer,
provider_uuid: String,
provider_type: String,
Returns:
Celery Async UUID.
"""
async_id = None
schema_name = report_meta.get("schema_name")
manifest_id = report_meta.get("manifest_id")
provider_uuid = report_meta.get("provider_uuid")
provider_type = report_meta.get("provider_type")
start_date = report_meta.get("start")
end_date = report_meta.get("end")
context = {"account": report_meta.get("schema_name"), "provider_uuid": provider_uuid}
with ReportManifestDBAccessor() as manifest_accesor:
if manifest_accesor.manifest_ready_for_summary(manifest_id):
new_report_meta = {
"schema_name": schema_name,
"provider_type": provider_type,
"provider_uuid": provider_uuid,
"manifest_id": manifest_id,
}
if start_date and end_date:
if "0001-01-01 00:00:00+00:00" in [str(start_date), str(end_date)]:
cr_status = report_meta.get("cr_status", {})
context["cluster-id"] = cr_status.get("clusterID", "no-cluster-id")
data_collection_message = cr_status.get("reports", {}).get("data_collection_message", "")
if data_collection_message:
# remove potentially sensitive info from the error message
msg = re.sub("{[^}]+}", "{***}", data_collection_message)
cr_status["reports"]["data_collection_message"] = msg
# The full CR status is logged below, but we should limit our alert to just the query.
# We can check the full manifest to get the full error.
LOG.error(log_json(manifest_uuid, f"data collection error [operator]: {msg}", context))
LOG.info(
log_json(manifest_uuid, f"CR Status for invalid manifest: {json.dumps(cr_status)}", context)
)
return # an invalid payload will fail to summarize, so return before we try
LOG.info(
log_json(
manifest_uuid,
f"Summarizing OCP reports from {str(start_date)}-{str(end_date)} for provider: {provider_uuid}",
context,
)
)
new_report_meta["start"] = start_date
new_report_meta["end"] = end_date
new_report_meta["manifest_uuid"] = manifest_uuid
async_id = summarize_reports.s([new_report_meta], OCP_QUEUE).apply_async(queue=OCP_QUEUE)
return async_id
def process_report(request_id, report):
"""
Process line item report.
Returns True when line item processing is complete. This is important because
the listen_for_messages -> process_messages path must have a positive acknowledgement
that line item processing is complete before committing.
If the service goes down in the middle of processing (SIGTERM) we do not want a
stray kafka commit to prematurely commit the message before processing has been
complete.
Args:
request_id (Str): The request id
report (Dict) - keys: value
request_id: String,
account: String,
schema_name: String,
manifest_id: Integer,
provider_uuid: String,
provider_type: String,
current_file: String,
date: DateTime
Returns:
True if line item report processing is complete.
"""
schema_name = report.get("schema_name")
manifest_id = report.get("manifest_id")
provider_uuid = str(report.get("provider_uuid"))
provider_type = report.get("provider_type")
date = report.get("date")
# The create_table flag is used by the ParquetReportProcessor
# to create a Hive/Trino table.
report_dict = {
"file": report.get("current_file"),
"compression": UNCOMPRESSED,
"manifest_id": manifest_id,
"provider_uuid": provider_uuid,
"request_id": request_id,
"tracing_id": report.get("tracing_id"),
"provider_type": "OCP",
"start_date": date,
"create_table": True,
}
try:
return _process_report_file(schema_name, provider_type, report_dict)
except NotImplementedError as err:
LOG.info(f"NotImplementedError: {str(err)}")
return True
def report_metas_complete(report_metas):
"""
Verify if all reports from the ingress payload have been processed.
in process_messages, a dictionary value "process_complete" is added to the
report metadata dictionary for a report file. This must be True for it to be
considered processed.
Args:
report_metas (list) - List of report metadata dictionaries needed for line item
processing.
Returns:
True if all report files for the payload have completed line item processing.
"""
process_complete = False
for report_meta in report_metas:
if not report_meta.get("process_complete"):
process_complete = False
break
else:
process_complete = True
return process_complete
def process_messages(msg):
"""
Process messages and send validation status.
Processing involves:
1. Downloading, verifying, extracting, and preparing report files for processing.
2. Line item processing each report file in the payload (downloaded from step 1).
3. Check if all reports have been processed for the manifest and if so, kick off
the celery worker task to summarize.
4. Send payload validation status to ingress service.
Args:
msg (ConsumerRecord) - Message from kafka hccm topic.
Returns:
None
"""
process_complete = False
status, report_metas, manifest_uuid = handle_message(msg)
value = json.loads(msg.value().decode("utf-8"))
request_id = value.get("request_id", "no_request_id")
tracing_id = manifest_uuid or request_id
if report_metas:
for report_meta in report_metas:
report_meta["process_complete"] = process_report(request_id, report_meta)
LOG.info(log_json(tracing_id, f"Processing: {report_meta.get('current_file')} complete."))
process_complete = report_metas_complete(report_metas)
summary_task_id = summarize_manifest(report_meta, tracing_id)
if summary_task_id:
LOG.info(log_json(tracing_id, f"Summarization celery uuid: {summary_task_id}"))
if status:
if report_metas:
file_list = [meta.get("current_file") for meta in report_metas]
files_string = ",".join(map(str, file_list))
LOG.info(log_json(tracing_id, f"Sending Ingress Service confirmation for: {files_string}"))
else:
LOG.info(log_json(tracing_id, f"Sending Ingress Service confirmation for: {value}"))
send_confirmation(value["request_id"], status)
return process_complete
def get_consumer(): # pragma: no cover
"""Create a Kafka consumer."""
consumer = Consumer(
{
"bootstrap.servers": Config.INSIGHTS_KAFKA_ADDRESS,
"group.id": "hccm-group",
"queued.max.messages.kbytes": 1024,
"enable.auto.commit": False,
"max.poll.interval.ms": 1080000, # 18 minutes
},
logger=LOG,
)
consumer.subscribe([Config.HCCM_TOPIC])
return consumer
def get_producer(): # pragma: no cover
"""Create a Kafka producer."""
producer = Producer({"bootstrap.servers": Config.INSIGHTS_KAFKA_ADDRESS, "message.timeout.ms": 1000})
return producer
def listen_for_messages_loop():
"""Wrap listen_for_messages in while true."""
consumer = get_consumer()
LOG.info("Consumer is listening for messages...")
for _ in itertools.count(): # equivalent to while True, but mockable
msg = consumer.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
KAFKA_CONNECTION_ERRORS_COUNTER.inc()
LOG.error(f"[listen_for_messages_loop] consumer.poll message: {msg}. Error: {msg.error()}")
continue
listen_for_messages(msg, consumer)
def rewind_consumer_to_retry(consumer, topic_partition):
"""Helper method to log and rewind kafka consumer for retry."""
LOG.info(f"Seeking back to offset: {topic_partition.offset}, partition: {topic_partition.partition}")
consumer.seek(topic_partition)
time.sleep(Config.RETRY_SECONDS)
def listen_for_messages(msg, consumer):
"""
Listen for messages on the hccm topic.
Once a message from one of these topics arrives, we add
them extract the payload and line item process the report files.
Once all files from the manifest are complete a celery job is
dispatched to the worker to complete summary processing for the manifest.
Several exceptions can occur while listening for messages:
Database Errors - Re-processing attempts will be made until successful.
Internal Errors - Re-processing attempts will be made until successful.
Report Processing Errors - Kafka message will be committed with an error.
Errors of this type would require a report processor
fix and we do not want to block the message queue.
Upon successful processing the kafka message is manually committed. Manual
commits are used so we can use the message queue to store unprocessed messages
to make the service more tolerant of SIGTERM events.
Args:
consumer - (Consumer): kafka consumer for HCCM ingress topic.
Returns:
None
"""
offset = msg.offset()
partition = msg.partition()
topic_partition = TopicPartition(topic=Config.HCCM_TOPIC, partition=partition, offset=offset)
try:
LOG.info(f"Processing message offset: {offset} partition: {partition}")
process_messages(msg)
LOG.debug(f"COMMITTING: message offset: {offset} partition: {partition}")
consumer.commit()
except (InterfaceError, OperationalError, ReportProcessorDBError) as error:
close_and_set_db_connection()
LOG.error(f"[listen_for_messages] Database error. Error: {type(error).__name__}: {error}. Retrying...")
rewind_consumer_to_retry(consumer, topic_partition)
except (KafkaMsgHandlerError, RabbitOperationalError) as error:
LOG.error(f"[listen_for_messages] Internal error. {type(error).__name__}: {error}. Retrying...")
rewind_consumer_to_retry(consumer, topic_partition)
except ReportProcessorError as error:
LOG.error(f"[listen_for_messages] Report processing error: {str(error)}")
LOG.debug(f"COMMITTING: message offset: {offset} partition: {partition}")
consumer.commit()
except Exception as error:
LOG.error(f"[listen_for_messages] UNKNOWN error encountered: {type(error).__name__}: {error}", exc_info=True)
def koku_listener_thread(): # pragma: no cover
"""
Configure Listener listener thread.
Returns:
None
"""
if is_kafka_connected(Config.INSIGHTS_KAFKA_HOST, Config.INSIGHTS_KAFKA_PORT): # Check that Kafka is running
LOG.info("Kafka is running.")
try:
listen_for_messages_loop()
except KeyboardInterrupt:
exit(0)
def initialize_kafka_handler(): # pragma: no cover
"""
Start Listener thread.
Args:
None
Returns:
None
"""
if Config.KAFKA_CONNECT:
event_loop_thread = threading.Thread(target=koku_listener_thread)
event_loop_thread.daemon = True
event_loop_thread.start()
event_loop_thread.join()
|
DataExtractor.py
|
#!python
# Extract the useful data from game files (json)
# Append the useful data to a csv file
import pickle
import os
import queue
import sys
from collections import OrderedDict
import multiprocessing
from multiprocessing.managers import BaseManager, NamespaceProxy
import time
import Modes
import pandas as pd
from collections import Counter
CHUNK_SIZE = 100
def extracted_writer(extracted_file, q, stop):
with open(extracted_file, 'a+') as f:
while not stop.is_set():
try:
game_path = q.get(timeout=1)
except queue.Empty:
continue
f.write(game_path)
f.write('\n')
print('Closing writer', file=sys.stderr)
class Extractor:
def __init__(self, mode, extracted_files, current_index, rot_length, writing_q):
self.mode = mode
self.rot_length = rot_length
self.writing_q = writing_q
self.current_index = current_index
if len(extracted_files) >= self.current_index > 0: # the file already exist
self.csv_file = os.path.join(mode.EXTRACTED_DIR, extracted_files[self.current_index - 1])
self.csv_index = len(pd.read_csv(self.csv_file, skiprows=1))
print(self.csv_file, 'lines', self.csv_index, file=sys.stderr)
else:
self.csv_file = None
self.csv_index = mode.DATA_LINES
class ExManager(BaseManager):
pass
class ExProxy(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__', 'b')
ExManager.register('Extractor', Extractor, ExProxy)
def run(mode, cpu):
extracted_file = mode.EXTRACTED_FILE
if os.path.isfile(extracted_file):
with open(extracted_file, 'r') as f:
extracted_list = [x.strip() for x in f.readlines()]
else:
extracted_list = []
gamePaths = []
for patch in mode.learning_patches:
for region in mode.REGIONS:
if os.path.isdir(os.path.join(mode.DATABASE, 'patches', patch, region)):
gamePaths.extend(
[os.path.join(mode.DATABASE, 'patches', patch, region, f) for f in
os.listdir(os.path.join(mode.DATABASE, 'patches', patch, region))])
print('%d game files found' % len(gamePaths), file=sys.stderr)
gamePaths = list(set(gamePaths) - set(extracted_list))
print('%d new games to extract' % len(gamePaths), file=sys.stderr)
if not os.path.isdir(mode.EXTRACTED_DIR):
os.makedirs(mode.EXTRACTED_DIR)
extracted_files = [f for f in os.listdir(mode.EXTRACTED_DIR)]
l = list(map(lambda x: int(x.replace('data_', '').replace('.csv', '')), extracted_files))
l = sorted(range(len(l)), key=lambda k: l[k])
extracted_files = [extracted_files[k] for k in l]
# multiprocessing
manager = multiprocessing.Manager()
writing_q = manager.Queue()
stop = manager.Event()
writer = multiprocessing.Process(target=extracted_writer, args=(extracted_file, writing_q, stop))
writer.start()
ex_manager = ExManager()
ex_manager.start()
available_extractors = []
running_extractors = []
for i in range(cpu):
current_index = len(extracted_files) - i
# noinspection PyUnresolvedReferences
available_extractors.append(ex_manager.Extractor(mode, extracted_files, current_index, cpu, writing_q))
while gamePaths:
# we work with chunks in order to save time (no need to hand over the extractor for every single game
chunk = gamePaths[:CHUNK_SIZE]
gamePaths = gamePaths[CHUNK_SIZE:]
print(len(gamePaths), 'left', file=sys.stderr)
while not available_extractors: # wait until an extractor is available
for p, ex in running_extractors:
if p.is_alive():
continue
available_extractors.append(ex)
running_extractors.remove((p, ex))
if not available_extractors: # wait a bit
time.sleep(0.001)
# start a new job
ex = available_extractors.pop()
p = multiprocessing.Process(target=analyze_game, args=(ex, chunk,))
running_extractors.append((p, ex))
p.start()
for p, ex in running_extractors:
p.join()
stop.set()
writer.join()
print('-- Extraction complete --')
def analyze_game(ex, gamePaths):
for gamePath in gamePaths:
raw_data = OrderedDict([('s_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL] + [('p_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL])
raw_data['patch'] = []
raw_data['win'] = []
raw_data['file'] = []
print(ex.csv_file, gamePath)
game = pickle.load(open(gamePath, 'rb'))
bans = []
game_patch = '_'.join(game['gameVersion'].split('.')[:2])
if game['gameDuration'] < 300:
print(gamePath, 'FF afk', game['gameDuration'], file=sys.stderr)
ex.writing_q.put(gamePath)
continue
blueTeam = None
redTeam = None
for team in game['teams']:
if team['teamId'] == 100:
blueTeam = team
elif team['teamId'] == 200:
redTeam = team
else:
print(gamePath, 'Unrecognized team %d' % team['teamId'], file=sys.stderr)
break
for ban in team['bans']:
championId = ban['championId']
if championId not in bans:
bans.append(championId)
if not blueTeam or not redTeam:
print(gamePath, 'Teams are not recognized', file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# not sure what is written for voided games, so it's safer to check both
# if we get something else than true/false or false/true we just ignore the file
blueWin = blueTeam['win'] == 'Win'
redWin = redTeam['win'] == 'Win'
if not blueWin ^ redWin:
print(gamePath, 'No winner found', blueWin, redWin, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
participants = game['participants']
# Blank, everything is available
state = OrderedDict()
state['win'] = int(blueWin)
state['patch'] = game_patch
state['file'] = os.path.basename(gamePath)
state.update([('s_' + champ_name, 'A') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Status
state.update([('p_' + champ_name, 'N') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Position
for key, value in state.items():
raw_data[key].append(value)
# Bans
state = OrderedDict(state) # don't forget to create a clean copy
for championId in bans:
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'N' # None
break
for key, value in state.items():
raw_data[key].append(value)
# Smart lane-role
# The Api doesn't precisely give players role, so we have to deduce it
b_roles = OrderedDict()
r_roles = OrderedDict()
for i in range(0, 10):
p = participants[i]
lane = p['timeline']['lane']
if i < 5:
if lane == 'TOP':
b_roles[i] = 'T'
elif lane == 'JUNGLE':
b_roles[i] = 'J'
elif lane == 'MIDDLE':
b_roles[i] = 'M'
elif lane == 'BOTTOM':
b_roles[i] = 'C'
elif lane == 'NONE':
b_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
else:
if lane == 'TOP':
r_roles[i] = 'T'
elif lane == 'JUNGLE':
r_roles[i] = 'J'
elif lane == 'MIDDLE':
r_roles[i] = 'M'
elif lane == 'BOTTOM':
r_roles[i] = 'C'
elif lane == 'NONE':
r_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
# Fill missing role '?'
# target at this point is something like 'T', 'J', 'M', 'C', 'C'
b_toFillCount = Counter(b_roles.values())['?']
if b_toFillCount > 1:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif b_toFillCount == 1:
fill_index = list(b_roles.keys())[list(b_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(b_roles.values()))
if len(missing_roles) == 1:
# non-bot role
b_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
b_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
r_toFillCount = Counter(r_roles.values())['?']
if r_toFillCount > 1:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif r_toFillCount == 1:
fill_index = list(r_roles.keys())[list(r_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(r_roles.values()))
if len(missing_roles) == 1:
# non-bot role
r_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
r_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# need to find the support in both team
# a lane will appear twice, most likely 'C'
# the support will either be tagged as 'SUPPORT' or have a low cs count
b_doubleRole = Counter(b_roles.values()).most_common(1)[0][0]
b_doublei = [i for i, r in b_roles.items() if r == b_doubleRole]
if len(b_doublei) > 2:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[b_doublei[0]]['timeline']['role']:
b_roles[b_doublei[0]] = 'S'
elif 'SUPPORT' in participants[b_doublei[1]]['timeline']['role']:
b_roles[b_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[b_doublei[0]]['timeline']:
if participants[b_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[b_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
else:
if participants[b_doublei[0]]['stats']['totalMinionsKilled'] < participants[b_doublei[1]]['stats']['totalMinionsKilled']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
r_doubleRole = Counter(r_roles.values()).most_common(1)[0][0]
r_doublei = [i for i, r in r_roles.items() if r == r_doubleRole]
if len(r_doublei) > 2:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[r_doublei[0]]['timeline']['role']:
r_roles[r_doublei[0]] = 'S'
elif 'SUPPORT' in participants[r_doublei[1]]['timeline']['role']:
r_roles[r_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[r_doublei[0]]['timeline']:
if participants[r_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[r_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
else:
if participants[r_doublei[0]]['stats']['totalMinionsKilled'] < participants[r_doublei[1]]['stats']['totalMinionsKilled']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
roles = OrderedDict()
roles.update(b_roles)
roles.update(r_roles)
# Draft
DRAFT_ORDER = [0, 5, 6, 1, 2, 7, 8, 3, 4, 9] # This is not exact. This order is not pick order but end-draft order: if some players
# trade, this order is wrong. Unfortunatelly there is no way to know the real pick order. So we just assume people don't trade often and
# that trading does not have a huge impact anyway.
for i in DRAFT_ORDER:
state = OrderedDict(state)
bluePick = i < 5
p = participants[i]
championId = p['championId']
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'B' if bluePick else 'R'
state['p_' + champ_name] = roles[i]
break
for key, value in state.items():
raw_data[key].append(value)
df = pd.DataFrame(raw_data, columns=ex.mode.COLUMNS)
if ex.csv_index + len(df) < ex.mode.DATA_LINES:
df.to_csv(ex.csv_file, mode='a', header=False, index=False)
ex.csv_index += len(df)
else: # split the data in two: finish prev file and start another
to_current = df.iloc[:ex.mode.DATA_LINES - ex.csv_index]
to_next = df.iloc[ex.mode.DATA_LINES - ex.csv_index:]
to_current.to_csv(ex.csv_file, mode='a', header=False, index=False)
# preparing new file
ex.current_index += ex.rot_length
current_file = 'data_' + str(ex.current_index) + '.csv'
ex.csv_file = os.path.join(ex.mode.EXTRACTED_DIR, current_file)
ex.csv_index = 0
to_next.to_csv(ex.csv_file, mode='a', header=True, index=False)
ex.csv_index += len(to_next)
# File fully explored
ex.writing_q.put(gamePath)
if __name__ == '__main__':
m = Modes.ABR_TJMCS_Mode(['9.1','9.2','9.3','9.4','9.5','9.6','9.7'])
run(m, max(multiprocessing.cpu_count() - 1, 1))
|
padding_fifo_queue_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class PaddingFIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
((), ()),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(3, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None, None),))
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
tf.PaddingFIFOQueue(10, tf.float32, None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), ((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(50, tf.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.PaddingFIFOQueue(100, tf.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.PaddingFIFOQueue(total_count, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(1, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_a")
q_a_2 = tf.PaddingFIFOQueue(15, tf.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_b")
q_b_2 = tf.PaddingFIFOQueue(10, tf.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_c")
q_c_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_f")
q_f_2 = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), ((), ()), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.PaddingFIFOQueue(10, tf.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(10, tf.float32, ((),))
q2 = tf.PaddingFIFOQueue(15, tf.float32, ((),))
enq_q = tf.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.PaddingFIFOQueue(5, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.PaddingFIFOQueue(5, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64, tf.complex128]
shape = (32, 4, 128)
q = tf.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype == tf.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
tf.PaddingFIFOQueue(32, [tf.float32], [tf.TensorShape(None)])
if __name__ == "__main__":
tf.test.main()
|
test_connect_attempts.py
|
'''
Test Origin Server Connect Attempts
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import time
import logging
import socket
import struct
import select
import threading
import helpers
log = logging.getLogger(__name__)
def thread_die_on_connect(sock):
sock.listen(0)
# poll
select.select([sock], [], [])
# exit
sock.close()
def thread_delayed_accept_after_connect(sock):
'''
Thread to sleep a decreasing amount of time before requests
sleep times: 2 -> 1 -> 0
'''
sock.listen(0)
sleep_time = 2
num_requests = 0
# poll
while True:
select.select([sock], [], [])
time.sleep(sleep_time)
try:
connection, addr = sock.accept()
connection.send((
'HTTP/1.1 200 OK\r\n'
'Content-Length: {body_len}\r\n'
'Content-Type: text/html; charset=UTF-8\r\n'
'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests)
))
connection.close()
num_requests += 1
except Exception as e:
print 'connection died!', e
pass
if sleep_time > 0:
sleep_time -= 1
def thread_reset_after_accept(sock):
sock.listen(0)
first = True
num_requests = 0
while True:
connection, addr = sock.accept()
num_requests += 1
if first:
first = False
connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
connection.close()
else:
connection.send((
'HTTP/1.1 200 OK\r\n'
'Content-Length: {body_len}\r\n'
'Content-Type: text/html; charset=UTF-8\r\n'
'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests)
))
connection.close()
def thread_partial_response(sock):
sock.listen(0)
first = True
num_requests = 0
while True:
connection, addr = sock.accept()
num_requests += 1
if first:
connection.send('HTTP/1.1 500 Internal Server Error\r\n')
connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
connection.close()
first = False
else:
connection.send((
'HTTP/1.1 200 OK\r\n'
'Content-Length: {body_len}\r\n'
'Content-Type: text/html; charset=UTF-8\r\n'
'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests)
))
connection.close()
class TestOriginServerConnectAttempts(helpers.EnvironmentCase):
@classmethod
def setUpEnv(cls, env):
'''
This function is responsible for setting up the environment for this fixture
This includes everything pre-daemon start
'''
cls.sock_map = {}
def _add_sock(name):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
cls.sock_map[name] = sock.getsockname()[1]
cls.configs['remap.config'].add_line('map /{0}/ http://127.0.0.1:{1}/'.format(name, cls.sock_map[name]))
return sock
# create a socket where we just bind
_add_sock('bound')
# create a socket where we bind + listen
sock = _add_sock('listen')
sock.listen(1)
# create a bunch of special socket servers
sock = _add_sock('die_on_connect')
t = threading.Thread(target=thread_die_on_connect, args=(sock,))
t.daemon = True
t.start()
sock = _add_sock('reset_after_accept')
t = threading.Thread(target=thread_reset_after_accept, args=(sock,))
t.daemon = True
t.start()
sock = _add_sock('delayed_accept_after_connect')
t = threading.Thread(target=thread_delayed_accept_after_connect, args=(sock,))
t.daemon = True
t.start()
sock = _add_sock('partial_response')
t = threading.Thread(target=thread_partial_response, args=(sock,))
t.daemon = True
t.start()
# only add server headers when there weren't any
cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2
# enable re-connects, timeout of 1s, max retires of 3
cls.configs['records.config']['CONFIG']['proxy.config.http.connect_attempts_timeout'] = 1
cls.configs['records.config']['CONFIG']['proxy.config.http.connect_attempts_max_retries'] = 3
def test_bound_origin(self):
'''Verify that we get 502s from an origin which just did a bind'''
url = 'http://127.0.0.1:{0}/bound/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 502)
def test_listen_origin(self):
'''Verify that we get 502s from origins that bind + listen'''
url = 'http://127.0.0.1:{0}/listen/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 502)
url = 'http://127.0.0.1:{0}/listen/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 502)
def test_die_on_connect_origin(self):
'''Verify that we get 504s from origins that die_on_connect'''
url = 'http://127.0.0.1:{0}/die_on_connect/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 504)
def test_partial_response_origin(self):
'''
Verify that we get 504s from origins that return a partial_response
We want to bail out-- since the origin already got the request, we can't
gaurantee that the request is re-entrant
'''
url = 'http://127.0.0.1:{0}/partial_response/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 500)
def test_reset_after_accept_origin(self):
'''Verify that we get 502s from origins that reset_after_accept, once any bytes are sent to origin we assume we cannot re-dispatch'''
url = 'http://127.0.0.1:{0}/reset_after_accept/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
self.assertEqual(ret.status_code, 502)
def test_delayed_accept_after_connect_origin(self):
'''Verify that we get 200s from origins that delayed_accept_after_connect'''
url = 'http://127.0.0.1:{0}/delayed_accept_after_connect/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
ret = requests.get(url)
# make sure it worked
self.assertEqual(ret.status_code, 200)
# make sure its not the first one (otherwise the test messed up somehow)
print ret.text
self.assertGreater(int(ret.text), 0)
|
runner.py
|
# From Kami: https://raw.github.com/Kami/parallel-django-and-twisted-test-runner/master/runner.py
import sys
import time
import logging
import multiprocessing
from multiprocessing import Process, Queue, Event
from Queue import Empty
from django.test.simple import DjangoTestSuiteRunner, DjangoTestRunner
from django.db import connections, DEFAULT_DB_ALIAS
from django.conf import settings
from django import VERSION
from .base import setup_test_db
# "auto" - one worker per Django application
# "cpu" - one worker per process core
WORKER_COUNT = getattr(settings, 'WORKER_COUNT', 'auto')
# amqplib spits out a lot of log messages which just add a lot of noise.
logging.getLogger().setLevel(logging.INFO)
null_file = open('/dev/null', 'w')
class BufferWritesDevice(object):
def __init__(self):
self._data = []
def write(self, string):
self._data.append(string)
def read(self):
return ''.join(self._data)
def flush(self, *args, **kwargs):
pass
def isatty(self):
return False
# Redirect stdout to /dev/null because we don't want to see all the repeated
# "database creation" logging statements from all the workers.
# All the test output is printed to stderr to this is not problematic.
# sys.stdout = null_file
class ParallelTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self._keyboard_interrupt_intercepted = False
self._worker_count = kwargs.get('worker_count', 'auto')
self._not_thread_safe = kwargs.get('not_thread_safe', None) or []
self._parent_timeout = kwargs.get('parent_timeout', 6)
self._worker_timeout = kwargs.get('worker_timeout', 5)
self._database_names = self._get_database_names()
def _get_database_names(self):
database_names = {}
for alias in connections:
connection = connections[alias]
database_name = connection.settings_dict['NAME']
database_names[alias] = database_name
return database_names
def run_tests(self, test_labels, **kwargs):
return self._run_tests(tests=test_labels)
def _run_tests(self, tests, **kwargs):
# tests = dict where the key is a test group name and the value are
# the tests to run
tests_queue = Queue()
results_queue = Queue()
stop_event = Event()
pending_tests = {}
pending_not_thread_safe_tests = {}
completed_tests = {}
failures = 0
errors = 0
start_time = time.time()
# First tun tests which are not thread safe in the main process
for group in self._not_thread_safe:
if group not in tests.keys():
continue
group_tests = tests[group]
del tests[group]
self.log('Running tests in a main process: %s' % (group_tests))
pending_not_thread_safe_tests[group] = group_tests
result = self._tests_func(tests=group_tests, worker_index=None)
results_queue.put((group, result), block=False)
for group, tests in tests.iteritems():
tests_queue.put((group, tests), block=False)
pending_tests[group] = tests
print "pending_tests", pending_tests
worker_count = self._worker_count
if worker_count == 'auto':
worker_count = len(pending_tests)
elif worker_count == 'cpu':
worker_count = multiprocessing.cpu_count()
print worker_count
if worker_count > len(pending_tests):
# No need to spawn more workers then there are tests.
worker_count = len(pending_tests)
worker_args = (tests_queue, results_queue, stop_event)
workers = self._create_worker_pool(pool_size=worker_count,
target_func=self._run_tests_worker,
worker_args=worker_args)
for index, worker in enumerate(workers):
self.log('Staring worker %s' % (index))
worker.start()
while pending_tests:
try:
try:
group, result = results_queue.get(timeout=self._parent_timeout,
block=True)
except Exception:
continue
try:
if group not in pending_not_thread_safe_tests:
pending_tests.pop(group)
else:
pending_not_thread_safe_tests.pop(group)
except KeyError:
self.log('Got a result for unknown group: %s' % (group))
else:
completed_tests[group] = result
self._print_result(result)
if result.failures or result.errors:
failures += len(result.failures)
errors += len(result.errors)
if self.failfast:
# failfast is enabled, kill all the active workers and stop
for worker in workers:
if worker.is_alive():
worker.terminate()
break
except Empty:
worker_left = False
for worker in workers:
if worker.is_alive():
worker_left = True
break
if not worker_left:
break
# We are done, signalize all the workers to stop
stop_event.set()
end_time = time.time()
self._exit(start_time, end_time, failures, errors)
def _run_tests_worker(self, index, tests_queue, results_queue, stop_event):
def pop_item():
group, tests = tests_queue.get(timeout=self._worker_timeout)
return group, tests
try:
try:
for group, tests in iter(pop_item, None):
if stop_event.is_set():
# We should stop
break
try:
result = None
result = self._tests_func(tests=tests, worker_index=index)
results_queue.put((group, result))
self.log('Worker %s has finished running tests %s' % (index, tests))
except Exception, e:
self.log('Running tests failed, reason: %s' % (str(e)))
result = TestResult().from_exception(e)
results_queue.put((group, result))
except Empty:
self.log('Worker %s timed out while waiting for tests to run' % (index))
finally:
tests_queue.close()
results_queue.close()
self.log('Worker %s is stopping' % (index))
def _pre_tests_func(self):
# This method gets called before _tests_func is called
pass
def _post_tests_func(self):
# This method gets called after _tests_func has completed and _print_result
# function is called
pass
def _tests_func(self, worker_index):
raise '_tests_func not implements'
def _print_result(self, result):
print >> sys.stderr, result.output
def _exit(self, start_time, end_time, failure_count, error_count):
time_difference = (end_time - start_time)
print >> sys.stderr, 'Total run time: %d seconds' % (time_difference)
try:
sys.exit(failure_count + error_count)
except Exception:
pass
def _group_by_app(self, test_labels):
"""
Groups tests by an app. This helps to partition tests so they can be run
in separate worker processes.
@TODO: Better partitioning of tests based on the previous runs - measure
test suite run time and partition tests so we can spawn as much workers as
it makes sense to get the maximum performance benefits.
"""
tests = {}
for test_label in test_labels:
if not test_label.find('.'):
app = test_label
else:
app = test_label.split('.')[0]
if not tests.get(app):
tests[app] = [ test_label ]
else:
tests[app].append(test_label)
return tests
def _group_by_file(self, test_names):
tests = {}
for test_name in test_names:
tests[test_name] = test_name
return tests
def _create_worker_pool(self, pool_size, target_func, worker_args):
workers = []
for index in range(0, pool_size):
args = (index,) + worker_args
worker = Process(target=target_func, args=args)
workers.append(worker)
return workers
def log(self, string):
if self.verbosity >= 3:
print string
class DjangoParallelTestSuiteRunner(ParallelTestSuiteRunner,
DjangoTestSuiteRunner):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
super(DjangoParallelTestSuiteRunner, self).__init__(verbosity, interactive,
failfast, **kwargs)
self._keyboard_interrupt_intercepted = False
self._parent_timeout = kwargs.get('parent_timeout', 6)
self._worker_timeout = kwargs.get('worker_timeout', 5)
self._database_names = self._get_database_names()
def run_tests(self, test_labels, extra_tests=None, **kwargs):
app_tests = self._group_by_app(test_labels)
return self._run_tests(tests=app_tests)
def run_suite(self, suite, **kwargs):
print "suite", suite
return DjangoParallelTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def setup_databases(self, **kwargs):
if VERSION[0] == 1:
if VERSION[1] == 2 and VERSION[2] < 4:
return self.setup_databases_12(**kwargs)
elif VERSION[2] >= 4 or VERSION[1] == 3:
return self.setup_databases_13(**kwargs)
raise Exception('Unsupported Django Version: %s' % (str(VERSION)))
def setup_databases_12(self, **kwargs):
# Taken from django.test.simple
old_names = []
mirrors = []
worker_index = kwargs.get('worker_index', None)
for alias in connections:
connection = connections[alias]
database_name = 'test_%d_%s' % (worker_index, connection.settings_dict['NAME'])
connection.settings_dict['TEST_NAME'] = database_name
if connection.settings_dict['TEST_MIRROR']:
mirrors.append((alias, connection))
mirror_alias = connection.settings_dict['TEST_MIRROR']
connections._connections[alias] = connections[mirror_alias]
else:
old_names.append((connection, connection.settings_dict['NAME']))
connection.creation.create_test_db(verbosity=0, autoclobber=not self.interactive)
return old_names, mirrors
def setup_databases_13(self, **kwargs):
# Taken from django.test.simple
from django.test.simple import dependency_ordered
mirrored_aliases = {}
test_databases = {}
dependencies = {}
worker_index = kwargs.get('worker_index', None)
for alias in connections:
connection = connections[alias]
database_name = 'test_%d_%s' % (worker_index, connection.settings_dict['NAME'])
connection.settings_dict['TEST_NAME'] = database_name
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(verbosity=0, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
old_names.append((connection, db_name, True))
connection.creation.create_test_db(verbosity=0, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def _tests_func(self, tests, worker_index):
self.setup_test_environment()
suite = self.build_suite(tests, [])
old_config = self.setup_databases(worker_index=worker_index)
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
result = TestResult().from_django_result(result)
return result
class DjangoParallelTestRunner(DjangoTestRunner):
def __init__(self, verbosity=2, failfast=True, **kwargs):
stream = BufferWritesDevice()
super(DjangoParallelTestRunner, self).__init__(stream=stream,
verbosity=verbosity,
failfast=failfast)
class TestResult(object):
dots = False
errors = None
failures = None
exception = None
output = None
def from_django_result(self, result_obj):
self.dots = result_obj.dots
self.errors = result_obj.errors
self.failures = self._format_failures(result_obj.failures)
self.output = result_obj.stream.read()
return self
def from_trial_result(self, result_obj):
self.errors = self._format_failures(result_obj.errors)
self.failures = self._format_failures(result_obj.failures)
self.output = result_obj.stream.read()
return self
def from_exception(self, exception):
self.exception = str(exception)
return self
def _format_failures(self, failures):
# errors and failures attributes by default contain values which are not
# pickable (class instance)
if not failures:
return failures
formatted = []
for failure in failures:
klass, message = failure
formatted.append((str(klass), message))
return formatted
|
preprocess.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 29 14:47:42 2016
@author: tomas
"""
import os
import json
import string
from Queue import Queue
from threading import Thread, Lock
import argparse
from math import floor
import h5py
import numpy as np
from skimage.io import imread
from skimage.color import rgb2gray
from skimage.util import img_as_ubyte
from scipy.misc import imresize
import misc.dataset_loader as dl
from misc.embeddings import dct
import misc.utils as utils
"""
{
"id": [int], Unique identifier for this image,
"regions": [
{
"id": [int] Unique identifier for this region,
"image": [int] ID of the image to which this region belongs,
"height": [int] Height of the region in pixels,
"width": [int] Width of the region in pixels,
"label": [string] label for this region,
"x": [int] x-coordinate of the upper-left corner of the region,
"y": [int] y-coordinate of the upper-left corner of the region,
},
...
]
}
The output JSON file is an object with the following elements:
- token_to_idx: Dictionary mapping strings to integers for encoding tokens,
in 1-indexed format.
- wtoi: maps words to integer
- itow: Inverse of the above.
The output HDF5 file has the following format to describe N images with
M total regions:
- images: uint8 array of shape (N, 1, image_size, image_size) of pixel data,
in BDHW format. Images will be resized so their longest edge is image_size
pixels long, aligned to the upper left corner, and padded with zeros.
The actual size of each image is stored in the image_heights and image_widths
fields.
- image_heights: int32 array of shape (N,) giving the height of each image.
- image_widths: int32 array of shape (N,) giving the width of each image.
- original_heights: int32 array of shape (N,) giving the original height of
each image.
- original_widths: int32 array of shape (N,) giving the original width of
each image.
- boxes: int32 array of shape (M, 4) giving the coordinates of each bounding box.
Each row is (xc, yc, w, h) where yc and xc are center coordinates of the box,
and are one-indexed.
- lengths: int32 array of shape (M,) giving lengths of label sequence for each box
- labels: int32 array of shape (M,) giving the integer label for each region.
- dct_word_embeddings: float32 array of shape (M, 108). DCToW Embedding of the ground truth
label for a ground truth box
- phoc_word_embeddings: float32 array of shape (M, 540). PHOC Embedding of the ground truth
label for a ground truth box
- img_to_first_box: int32 array of shape (N,). If img_to_first_box[i] = j then
labels[j] and boxes[j] give the first annotation for image i
(using one-indexing).
- img_to_last_box: int32 array of shape (N,). If img_to_last_box[i] = j then
labels[j] and boxes[j] give the last annotation for image i
(using one-indexing).
- box_to_img: int32 array of shape (M,). If box_to_img[i] = j then then
regions[i] and labels[i] refer to images[j] (using one-indexing).
- region_proposals: int32 array of shape (R, 4) giving the coordinates of each region proposal.
Each row is (xc, yc, w, h) where yc and xc are center coordinates of the box, and are one-indexed.
- img_to_first_rp: int32 array of shape (N,). The same as img_to_first_box but for region proposals
instead of ground truth boxes.
- img_to_last_rp: int32 array of shape (N,). The same as img_to_last_box but for region proposals
instead of ground truth boxes.
"""
def build_vocab_dict(vocab):
token_to_idx, idx_to_token = {}, {}
next_idx = 1
for token in vocab:
token_to_idx[token] = next_idx
idx_to_token[next_idx] = token
next_idx = next_idx + 1
return token_to_idx, idx_to_token
def encode_word_embeddings(data, wtoe):
"""
Encode each label as a word embedding
"""
we = []
for datum in data:
for r in datum['regions']:
we.append(wtoe[r['label']])
return np.array(we)
def encode_labels(data, wtoi):
"""
Encode each label as an integer
"""
labels = []
for datum in data:
for r in datum['regions']:
labels.append(wtoi[r['label']])
return np.array(labels)
def encode_boxes(data, original_heights, original_widths, image_size, max_image_size, box_type='gt_boxes'):
all_boxes = []
xwasbad = 0
ywasbad = 0
wwasbad = 0
hwasbad = 0
for i, datum in enumerate(data):
H, W = original_heights[i], original_widths[i]
scale = float(image_size) / max(H, W)
#Needed for not so tightly labeled datasets, like washington
if box_type == 'region_proposals':
datum[box_type] = utils.pad_proposals(datum[box_type], (H, W), 10)
for box in datum[box_type]:
x, y = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
x, y = round(scale*(x-1)+1), round(scale*(y-1)+1)
w, h = round(scale*w), round(scale*h)
# clamp to image
if x < 1: x = 1
if y < 1: y = 1
if x > max_image_size[1] - 1:
x = max_image_size[1] - 1
xwasbad += 1
if y > max_image_size[0] - 1:
y = max_image_size[0] - 1
ywasbad += 1
if x + w > max_image_size[1]:
w = max_image_size[1] - x
wwasbad += 1
if y + h > max_image_size[0]:
h = max_image_size[0] - y
hwasbad += 1
break
b = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32) # also convert to center-coord oriented
assert b[2] > 0 # width height should be positive numbers
assert b[3] > 0
all_boxes.append(b)
print 'number of bad x,y,w,h: ', xwasbad, ywasbad, wwasbad, hwasbad
return np.vstack(all_boxes)
def build_img_idx_to_box_idxs(data, boxes='regions'):
img_idx = 1
box_idx = 1
num_images = len(data)
img_to_first_box = np.zeros(num_images, dtype=np.int32)
img_to_last_box = np.zeros(num_images, dtype=np.int32)
for datum in data:
img_to_first_box[img_idx - 1] = box_idx
for region in datum[boxes]:
box_idx += 1
img_to_last_box[img_idx - 1] = box_idx - 1 # -1 to make these inclusive limits
img_idx += 1
return img_to_first_box, img_to_last_box
def build_filename_dict(data):
# First make sure all filenames
next_idx = 1
filename_to_idx, idx_to_filename = {}, {}
for img in data:
filename = img['id']
filename_to_idx[filename] = next_idx
idx_to_filename[next_idx] = filename
next_idx += 1
return filename_to_idx, idx_to_filename
def encode_filenames(data, filename_to_idx):
filename_idxs = []
for img in data:
filename = img['id']
idx = filename_to_idx[filename]
for region in img['regions']:
filename_idxs.append(idx)
return np.asarray(filename_idxs, dtype=np.int32)
def add_images(data, h5_file, image_size, max_image_size, num_workers=5):
num_images = len(data)
shape = (num_images, 1, max_image_size[0], max_image_size[1])
image_dset = h5_file.create_dataset('images', shape, dtype=np.uint8)
original_heights = np.zeros(num_images, dtype=np.int32)
original_widths = np.zeros(num_images, dtype=np.int32)
image_heights = np.zeros(num_images, dtype=np.int32)
image_widths = np.zeros(num_images, dtype=np.int32)
lock = Lock()
q = Queue()
for i, img in enumerate(data):
q.put((i, img['id']))
def worker():
while True:
i, filename = q.get()
img = imread(filename)
if img.ndim == 3:
img = img_as_ubyte(rgb2gray(img))
H0, W0 = img.shape[0], img.shape[1]
img = imresize(img, float(image_size) / max(H0, W0))
H, W = img.shape[0], img.shape[1]
img = np.invert(img)
lock.acquire()
if i % 1000 == 0:
print 'Writing image %d / %d' % (i, len(data))
original_heights[i] = H0
original_widths[i] = W0
image_heights[i] = H
image_widths[i] = W
image_dset[i, :, :H, :W] = img
lock.release()
q.task_done()
print('adding images to hdf5.... (this might take a while)')
for i in xrange(num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
h5_file.create_dataset('image_heights', data=image_heights)
h5_file.create_dataset('image_widths', data=image_widths)
h5_file.create_dataset('original_heights', data=original_heights)
h5_file.create_dataset('original_widths', data=original_widths)
def encode_splits(data):
""" Encode splits as intetgers and return the array. """
lookup = {'train': 0, 'val': 1, 'test': 2}
return [lookup[datum['split']] for datum in data]
#reset = False
#dataset = 'washington'
#root = 'data/dbs/'
#fold = 1
#augment = False
#suffix = ''
def create_dataset(dataset, root, suffix='', augment=False, fold=1, reset=False):
num_workers = 5
image_size = 1720
alphabet = string.ascii_lowercase + string.digits
if not os.path.exists(root):
os.makedirs(root)
dataset_full = dataset + '_fold%d' % fold
outdir = root + dataset_full + '/'
h5_output = root + dataset_full
json_output = root + dataset_full
if suffix:
h5_output += '_' + suffix
json_output += '_' + suffix
h5_output += '.h5'
json_output += '.json'
# read in the data
data = getattr(dl, 'load_' + dataset)(fold)
sizes = []
means = []
for datum in data:
img = imread(datum['id'])
if img.ndim == 3:
img = img_as_ubyte(rgb2gray(img))
if datum['split'] == 'train':
means.append(np.invert(img).mean())
sizes.append(img.shape)
image_mean = np.mean(means)
sizes = np.array(sizes)
max_image_size = sizes.max(axis=0)
if augment:
num_images = 5000
num_train = len([datum for datum in data if datum['split'] == 'train'])
tparams = {}
# get approximately the same amount of images
tparams['samples_per_image'] = int(np.round(float(num_images / 2) / num_train))
tparams['shear'] = (-5, 30)
tparams['order'] = 1 #bilinear
tparams['selem_size'] = (3, 4) #max size for square kernel for erosion, dilation
inplace_data = dl.inplace_augment(data, outdir, tparams=tparams, reset=reset) #original data is kept here
nps = num_images - tparams['samples_per_image'] * num_train
full_page_data = dl.fullpage_augment(data, outdir, nps, reset=reset) #only augmented data is added here
data = inplace_data + full_page_data
# create the output hdf5 file handle
f = h5py.File(h5_output, 'w')
# add several fields to the file: images, and the original/resized widths/heights
add_images(data, f, image_size, max_image_size, num_workers)
f.create_dataset('image_mean', data=np.array([image_mean]))
# add split information
split = encode_splits(data)
f.create_dataset('split', data=split)
# build vocabulary
vocab, _ = utils.build_vocab(data)
wtoi, itow = build_vocab_dict(vocab) # both mappings are dicts
# encode dct embeddings
dct_wtoe = {w:dct(w, 3, alphabet) for w in vocab}
dct_word_embeddings = encode_word_embeddings(data, dct_wtoe)
f.create_dataset('dct_word_embeddings', data=dct_word_embeddings)
# encode boxes
original_heights = np.asarray(f['original_heights'])
original_widths = np.asarray(f['original_widths'])
gt_boxes = encode_boxes(data, original_heights, original_widths, image_size, max_image_size)
f.create_dataset('boxes', data=gt_boxes)
# write labels
labels = encode_labels(data, wtoi)
f.create_dataset('labels', data=labels)
# integer mapping between image ids and region_proposals ids
utils.filter_region_proposals(data, original_heights, original_widths, image_size)
region_proposals = encode_boxes(data, original_heights, original_widths,
image_size, max_image_size, 'region_proposals')
f.create_dataset('region_proposals', data=region_proposals)
img_to_first_rp, img_to_last_rp = build_img_idx_to_box_idxs(data, 'region_proposals')
f.create_dataset('img_to_first_rp', data=img_to_first_rp)
f.create_dataset('img_to_last_rp', data=img_to_last_rp)
# integer mapping between image ids and box ids
img_to_first_box, img_to_last_box = build_img_idx_to_box_idxs(data)
f.create_dataset('img_to_first_box', data=img_to_first_box)
f.create_dataset('img_to_last_box', data=img_to_last_box)
# filename_to_idx, idx_to_filename = build_filename_dict(data)
# box_to_img = encode_filenames(data, filename_to_idx)
# f.create_dataset('box_to_img', data=box_to_img)
f.close()
# and write the additional json file
json_struct = {
'wtoi': wtoi,
'itow': itow}
# 'filename_to_idx': filename_to_idx,
# 'idx_to_filename': idx_to_filename}
with open(json_output, 'w') as f:
json.dump(json_struct, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir',
default='data/dbs/',
help='Path to where your data is located')
parser.add_argument('--cross_val',
default=0,
help='Whether or not to use 4-fold cross validation, default 0')
args = parser.parse_args()
folds = 1
if args.cross_val:
folds = 4
for fold in range(1, folds + 1):
create_dataset('washington', args.root_dir, fold=fold)
create_dataset('washington', args.root_dir, suffix='augmented', augment=True, fold=fold)
|
process.py
|
from multiprocessing import Process, Queue
from promise import Promise
from .utils import process
def queue_process(q):
promise, fn, args, kwargs = q.get()
process(promise, fn, args, kwargs)
class ProcessExecutor(object):
def __init__(self):
self.processes = []
self.q = Queue()
def wait_until_finished(self):
while self.processes:
processes = self.processes
self.processes = []
[_process.join() for _process in processes]
self.q.close()
self.q.join_thread()
def clean(self):
self.processes = []
def execute(self, fn, *args, **kwargs):
promise = Promise()
self.q.put([promise, fn, args, kwargs], False)
_process = Process(target=queue_process, args=(self.q))
_process.start()
self.processes.append(_process)
return promise
|
broadcastServer.py
|
# simple server for receiving udp packets
# only purpose is to test functionality manually
# large chunk copied off some online tutorial
import socket
import socket
def do_some_stuffs_with_input(input_string):
"""
This is where all the processing happens.
Let's just read the string backwards
"""
print("Processing the input ...")
return input_string[::-1]
def client_thread(data, ip, port, MAX_BUFFER_SIZE = 4096):
# the input is in bytes, so decode it
# input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)
# MAX_BUFFER_SIZE is how big the message can be
# this is test if it's sufficiently big
input_from_client_bytes = data
import sys
siz = sys.getsizeof(input_from_client_bytes)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
# decode input and strip the end of line
input_from_client = input_from_client_bytes.decode("utf8").rstrip()
res = do_some_stuffs_with_input(input_from_client)
print("Result of processing {} is: {}".format(input_from_client, res))
soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soc.sendto(res.encode("utf8"),(ip, int(port))) # we must encode the string to bytes
print('Connection ' + ip + ':' + port + " ended")
def start_server():
soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Broadcast Socket created')
try:
ip = '0.0.0.0'
soc.bind((ip, 28196)) # match this with BROADCAST_PORT in common.consts
print('Socket bind complete')
except socket.error as msg:
import sys
print('Bind failed. Error : ' + str(sys.exc_info()) + " msg: " + str(msg))
sys.exit()
print(soc.getsockname())
from threading import Thread
# this will make an infinite loop needed for
# not reseting server for every client
while True:
data , addr = soc.recvfrom(4096)
ip, port = str(addr[0]), str(addr[1])
print('Received %s from %s', data, addr)
try:
Thread(target=client_thread, args=(data, ip, port)).start()
except:
print("Terible error!")
import traceback
traceback.print_exc()
soc.close()
start_server()
|
3_exam.py
|
import threading
from threading import Lock
# main function
def print_ln(lk, number, text):
lk.acquire()
print(f'{number}||{text}')
lk.release()
def main():
# Создаем блокиратор потока
lk = Lock()
# Создаем поток и передаем ему функцию для исполнения и аргументы
# Обозначаем поток как демона
# text = [input().split(" ")]
text = "das asd fsd fzd".split(" ")
for i in range(4):
thr = threading.Thread(target=print_ln, args=(lk, i + 1, text[i]))
# Запускаем выполнение потока
thr.start()
# Ждем завершения потока и не завершаем программу
thr.join()
if __name__ == '__main__':
main()
|
jobber.py
|
import itertools
import numpy as np
import json
import scipy.special as scs
import multiprocessing as mp
def group_cost(combinations):
cost = 0
for case in combinations:
cost += np.prod([configuration_count(c) for c in case])
return cost
def combinations_in_group(T, R):
combinations = []
for case in itertools.product(range(R+1), repeat=T):
if R in case:
combinations.append(case)
return combinations
def configuration_count(resolution):
if resolution == 0:
return 1
return 2**(resolution - 1)
def costfunc(r, t):
rest = range(0, r+1)
combinations = itertools.product(rest, repeat=t)
combinations = list(combinations)
combinations = [list(x) for x in combinations if r in x]
return len(combinations)
def make_job(molidx, line, torsion_bodies, torsion_resolutions, perworkpackage=3000):
# if molidx != 6091: return
# if molidx != 0: return
line = line.strip().split()
N_torsions = line[1]
N_torsions = int(N_torsions)
torsion_idx = list(range(N_torsions))
# guido cost func
costmatrix = np.zeros((len(torsion_resolutions), len(torsion_bodies)), dtype=int)
for i, R in enumerate(torsion_resolutions):
for j, T in enumerate(torsion_bodies):
cost = costfunc(R, T)
cost = float(cost)
cost = int(cost)
costmatrix[i,j] = cost
# resolutions = range(torsion_resolutions+1)
# resolutions = list(resolutions)
total = 0
fjob = open("jobs/"+str(molidx) + ".wp", 'w')
jobs = []
for ib, body in enumerate(torsion_bodies):
combinations = itertools.combinations(torsion_idx, body)
for combination in combinations:
combination = [str(x) for x in combination]
combination = " ".join(combination)
for ir, resolution in enumerate(torsion_resolutions):
cost = costmatrix[ir, ib]
job = [str(molidx), combination, str(resolution), str(cost)]
job = ",".join(job)
jobs.append(job)
# wrap jobs in workpackages
workpackages = []
counter = 0
current = []
perworkpackage = 3000
for line in jobs:
line = line.strip()
info = line.split(",")
cost = info[-1]
cost = int(cost)
counter += cost
# current += [",".join(info[:-1])]
current += [line]
if counter > perworkpackage:
workpackages.append(";".join(current))
counter = 0
current = []
workpackages.append(";".join(current))
workpackages = "\n".join(workpackages)
fjob.write(workpackages)
fjob.close()
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--torsion-bodies', nargs="+", type=int, help='', metavar='N', default=[2])
parser.add_argument('-r', '--torsion-resolutions', nargs="+", type=int, help='', metavar='N', default=[2])
parser.add_argument('-j', '--workers', type=int, help='', metavar='N', default=1)
args = parser.parse_args()
names = []
lines = []
with open("list_torsions", 'r') as f:
for molidx, line in enumerate(f):
names.append(molidx)
lines.append(line)
if args.workers == 1:
for molidx, line in zip(names, lines):
make_job(molidx, line, args.torsion_bodies, args.torsion_resolutions)
else:
worker = lambda x, y : make_job(x, y, args.torsion_bodies, args.torsion_resolutions)
workers = args.workers
processes = [mp.Process(target=worker, args=(names[i], lines[i])) for i in range(workers)]
for p in processes: p.start() # Fork
for p in processes: p.join() # Join
return
if __name__ == '__main__':
main()
|
SnakeServer.py
|
import socket
import pickle
import random
import threading
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
def get_ip():
try:
h_name = socket.gethostname()
IP = socket.gethostbyname(h_name)
except Exception:
IP = '127.0.0.1'
return IP
class SnakeServer:
def __init__(self):
self.running = True
def GameSession(self, Connections):
food_pos = []
data_snake_1 = []
data_snake_2 = []
no_food = True
while self.running:
data_snake_1 = Connections[0].recv(1024)
data_snake_2 = Connections[1].recv(1024)
if not data_snake_1:
break
if not data_snake_2:
break
data_snake_1 = pickle.loads(data_snake_1)
data_snake_2 = pickle.loads(data_snake_2)
if not food_pos:
no_food = True
else:
no_food = False
while not food_pos:
x = random.randint(2, 28) * 20
y = random.randint(2, 28) * 20
if not [x, y] in data_snake_1 and not [x, y] in data_snake_2:
food_pos = [x, y]
data_snake_1.append(food_pos)
data_snake_2.append(food_pos)
if data_snake_1[-2] == food_pos or data_snake_2[-2] == food_pos:
food_pos = []
data_snake_1.append([-20, -20])
data_snake_2.append([-20, -20])
if food_pos and no_food == False:
data_snake_1.append(food_pos)
data_snake_2.append(food_pos)
data_snake_1 = pickle.dumps(data_snake_1)
data_snake_2 = pickle.dumps(data_snake_2)
Connections[0].sendall(data_snake_2)
Connections[1].sendall(data_snake_1)
def Start(self):
self.running = True
HOST = get_ip()
PORT = 65432
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((HOST, PORT))
self.s.listen(5)
conn_list = []
threads = []
print("Server en funcionamiento")
while self.running:
for i in range(2):
try:
if i:
print("Esperando segundo jugador")
conn, addr = self.s.accept()
print('Conectado', addr[0])
conn_list.append(conn)
except:
self.running = False
threads.append(threading.Thread(
target=self.GameSession, args=(conn_list,)))
threads[-1].start()
conn_list = []
def Stop(self):
self.s.close()
self.running = False
class ServerWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(ServerWindow, self).__init__(*args, **kwargs)
self.server_is_online = False
self.ss = SnakeServer()
# self.server = threading.Thread(target=self.ss.Start, args=())
self.setWindowTitle("Server")
self.setFixedWidth(500)
self.setFixedHeight(500)
self.server_status = QLabel(" Server Status: Offline", self)
self.server_status.setStyleSheet("background-color: red")
self.server_status.resize(150, 30)
self.server_status.move(0, 0)
ip_label = QLabel("Local IP:" + get_ip(),self)
print(get_ip())
ip_label.move(200,0)
start_btn = QPushButton("Start", self)
start_btn.setToolTip("Start your Server")
start_btn.resize(150, 100)
start_btn.move(0, 200)
start_btn.setStyleSheet("QPushButton"
"{"
"background-color : green;"
"}")
start_btn.clicked.connect(self.On_start)
stop_btn = QPushButton("Stop", self)
stop_btn.setToolTip("Stop your Server")
stop_btn.resize(150, 100)
stop_btn.move(150, 200)
stop_btn.setStyleSheet("QPushButton"
"{"
"background-color : red;"
"}")
stop_btn.clicked.connect(self.On_stop)
def PlayerList(self):
pass
def On_start(self):
if not self.server_is_online:
self.server_status.setStyleSheet("background-color: green")
self.server_status.setText(" Server Status: Online")
self.server = threading.Thread(target=self.ss.Start, args=())
self.server.start()
self.server_is_online = True
else:
print("Server is allready running")
def On_stop(self):
if self.server_is_online:
self.server_status.setStyleSheet("background-color: red")
self.server_status.setText(" Server Status: Offline")
print("Server is stopping...")
self.ss.Stop()
self.server.join()
print("Server Stopped")
self.server_is_online = False
else:
print("Server is allready stopped")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ServerWindow()
window.show()
app.exec_()
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-pytorch": {
"inputs": "image",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = output_dir
log_output_settings.copy_summary_to_stdout = False
log_settings = lg.LogSettings()
log_settings.enable_trace = args.debug
log_settings.log_output = log_output_settings
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
files.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab-specific file helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import os
import socket
import threading
import uuid
import IPython
import portpicker
import six
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from google.colab import output
def upload():
"""Renders widget to upload local (to the browser) files to the kernel.
Blocks until the files are available.
Returns:
A map of the form {<filename>: <file contents>} for all uploaded files.
"""
upload_id = str(uuid.uuid4())
input_id = 'files-' + upload_id
output_id = 'result-' + upload_id
IPython.display.display(
IPython.core.display.HTML("""
<input type="file" id="{input_id}" name="files[]" multiple disabled />
<output id="{output_id}">
Upload widget is only available when the cell has been executed in the
current browser session. Please rerun this cell to enable.
</output>
<script src="/nbextensions/google.colab/files.js"></script> """.format(
input_id=input_id, output_id=output_id)))
# First result is always an indication that the file picker has completed.
result = output.eval_js(
'google.colab._files._uploadFiles("{input_id}", "{output_id}")'.format(
input_id=input_id, output_id=output_id))
files = collections.defaultdict(six.binary_type)
# Mapping from original filename to filename as saved locally.
local_filenames = dict()
while result['action'] != 'complete':
result = output.eval_js(
'google.colab._files._uploadFilesContinue("{output_id}")'.format(
output_id=output_id))
if result['action'] != 'append':
# JS side uses a generator of promises to process all of the files- some
# steps may not produce data for the Python side, so just proceed onto the
# next message.
continue
data = base64.b64decode(result['data'])
filename = result['file']
files[filename] += data
local_filename = local_filenames.get(filename)
if not local_filename:
local_filename = _get_unique_filename(filename)
local_filenames[filename] = local_filename
print('Saving {filename} to {local_filename}'.format(
filename=filename, local_filename=local_filename))
with open(local_filename, 'ab') as f:
f.write(data)
return dict(files)
def _get_unique_filename(filename):
if not os.path.lexists(filename):
return filename
counter = 1
while True:
path, ext = os.path.splitext(filename)
new_filename = '{} ({}){}'.format(path, counter, ext)
if not os.path.lexists(new_filename):
return new_filename
counter += 1
class _V6Server(socketserver.TCPServer):
address_family = socket.AF_INET6
class _FileHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""SimpleHTTPRequestHandler with a couple tweaks."""
def translate_path(self, path):
# Client specifies absolute paths.
return path
def log_message(self, fmt, *args):
# Suppress logging since it's on the background. Any errors will be reported
# via the handler.
pass
def end_headers(self):
# Do not cache the response in the notebook, since it may be quite large.
self.send_header('x-colab-notebook-cache-control', 'no-cache')
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def download(filename):
"""Downloads the file to the user's local disk via a browser download action.
Args:
filename: Name of the file on disk to be downloaded.
"""
started = threading.Event()
port = portpicker.pick_unused_port()
def server_entry():
httpd = _V6Server(('::', port), _FileHandler)
started.set()
# Handle a single request then exit the thread.
httpd.handle_request()
thread = threading.Thread(target=server_entry)
thread.start()
started.wait()
output.eval_js(
"""
(async function() {
const response = await fetch('https://localhost:%(port)d%(path)s');
if (!response.ok) {
throw new Error('Failed to download: ' + response.statusText);
}
const blob = await response.blob();
const a = document.createElement('a');
a.href = window.URL.createObjectURL(blob);
a.download = '%(name)s';
a.click();
})();
""" % {
'port': port,
'path': os.path.abspath(filename),
'name': os.path.basename(filename),
})
|
basic_mapping.py
|
import uuid, datetime, multiprocessing
import pandas as pd
from functree import app, constants, models, tree, analysis, services
def from_table(form):
methods=['mean', 'sum']
if form.modulecoverage.data and services.DefinitionService.has_definition(form.target.data):
methods.append('modulecoverage')
result = calc_abundances(f=form.input_file.data, target=form.target.data, methods=methods, distribute=form.distribute.data)
profile_id = constants.NO_MATCHED_HIERARCHIES
# if rows were mapped
if len(result['profile']) > 0:
colors = []
if form.color_file.data:
colors = pd.read_csv(form.color_file.data, header=None, delimiter='\t').as_matrix().tolist()
utcnow = datetime.datetime.utcnow()
# This inset is 4 seconds
# Maybe it is document size Or maybe I shoudl use insert instead of save
profile_id = models.Profile(
profile_id=uuid.uuid4(),
profile=result['profile'],
series=result['series'],
columns=result['columns'],
colors=colors,
target=form.target.data,
description=form.description.data,
added_at=utcnow,
expire_at=utcnow + datetime.timedelta(days=app.config['FUNCTREE_PROFILE_TTL_DAYS']),
private=form.private.data
).save().profile_id
return profile_id
def calc_abundances(f, target, methods, distribute):
df = analysis.load_input(f)
# transform external annotations to kegg KOs
# runs in 1.66
if target.lower() in ["kegg", "foam", "enteropathway"]:
df = analysis.map_external_annotations(df)
# runs in 2 seconds
# Different querying strategies did not make any difference so far
root = models.Tree.objects(source=target).only('tree').first()['tree']
nodes = tree.get_nodes(root)
entry_to_layer = dict(map(lambda x: (x['entry'], x['layer']), nodes))
manager = multiprocessing.Manager()
shared_data = manager.dict()
jobs = list()
# This runs 6 seconds
for method in methods:
if not method == "modulecoverage":
if distribute == True:
job = multiprocessing.Process(target=analysis.calc_distributed_abundances, args=(df, tree.to_graph(root), method, shared_data), daemon=False)
else:
job = multiprocessing.Process(target=analysis.calc_abundances, args=(df, nodes, method, shared_data), daemon=False)
else:
job = multiprocessing.Process(target=analysis.module_coverage.calc_coverages, args=(df, target, shared_data), daemon=False)
job.start()
jobs.append(job)
for job in jobs:
job.join()
results = dict(shared_data)
profile = []
# load KO based entries
if len(results) > 0:
entries=list(list(results.values())[0].index)
if "modulecoverage" in methods:
entries += list(list(results.values())[2].index)
entries = list(set(entries))
# This is taking 2 to 4 seconds to run
for entry in entries:
#values = [results[method].ix[entry].tolist() for method in methods]
values = []
for method in methods:
if entry in results[method].index:
values.append(results[method].ix[entry].tolist())
else:
values.append([0] * df.columns.size)
profile.append({'entry': entry, 'layer': analysis.get_layer(entry, entry_to_layer), 'values': values})
data = {
'profile': profile,
'series': methods,
'columns': [df.columns.tolist() for i in range(len(methods))]
}
return data
|
dabam_height_profile.py
|
import sys
import time
import numpy
import threading
from PyQt5.QtCore import QRect, Qt
from PyQt5.QtWidgets import QApplication, QMessageBox, QScrollArea, QTableWidget, QTableWidgetItem, QHeaderView, QAbstractItemView, QWidget, QLabel, QSizePolicy
from PyQt5.QtGui import QTextCursor,QFont, QPalette, QColor, QPainter, QBrush, QPen, QPixmap
from matplotlib import cm
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
import orangecanvas.resources as resources
from orangewidget import gui, widget
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import EmittingStream
from orangecontrib.wiser.util.wise_objects import WiserPreInputData
from srxraylib.metrology import profiles_simulation, dabam
from Shadow import ShadowTools as ST
class OWdabam_height_profile(OWWidget):
name = "DABAM Height Profile"
id = "dabam_height_profile"
description = "Calculation of mirror surface error profile"
icon = "icons/dabam.png"
author = "Luca Rebuffi"
maintainer_email = "srio@esrf.eu; luca.rebuffi@elettra.eu"
priority = 2
category = ""
keywords = ["dabam_height_profile"]
outputs = [{"name": "PreInput",
"type": WiserPreInputData,
"doc": "PreInput",
"id": "PreInput"}]
want_main_area = 1
want_control_area = 1
MAX_WIDTH = 1320
MAX_HEIGHT = 700
IMAGE_WIDTH = 860
IMAGE_HEIGHT = 645
CONTROL_AREA_WIDTH = 405
TABS_AREA_HEIGHT = 618
xx = None
yy = None
entry_number = Setting(1)
shape=Setting(0)
slope_error_from = Setting(0.0)
slope_error_to = Setting(1.5)
dimension_y_from = Setting(0.0)
dimension_y_to = Setting(200.0)
use_undetrended = Setting(0)
center_y = Setting(1)
modify_y = Setting(0)
new_length = Setting(200.0)
filler_value = Setting(0.0)
renormalize_y = Setting(1)
error_type_y = Setting(0)
rms_y = Setting(0.9)
dabam_profile_index = Setting(1)
heigth_profile_file_name = Setting('mirror.dat')
tab=[]
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Calculate Height Profile", self)
self.runaction.triggered.connect(self.calculate_heigth_profile_ni)
self.addAction(self.runaction)
self.runaction = widget.OWAction("Generate Height Profile File", self)
self.runaction.triggered.connect(self.generate_heigth_profile_file_ni)
self.addAction(self.runaction)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width() * 0.05),
round(geom.height() * 0.05),
round(min(geom.width() * 0.98, self.MAX_WIDTH)),
round(min(geom.height() * 0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
# DABAM INITIALIZATION
self.server = dabam.dabam()
self.server.set_input_silent(True)
gui.separator(self.controlArea)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Calculate Height\nProfile", callback=self.calculate_heigth_profile)
button.setFixedHeight(45)
button = gui.button(button_box, self, "Generate Height\nProfile File", callback=self.generate_heigth_profile_file)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
button = gui.button(button_box, self, "Reset Fields", callback=self.call_reset_settings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
gui.separator(self.controlArea)
tabs_setting = oasysgui.tabWidget(self.controlArea)
tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_input = oasysgui.createTabPage(tabs_setting, "DABAM Search Setting")
tab_gener = oasysgui.createTabPage(tabs_setting, "DABAM Generation Setting")
tab_out = oasysgui.createTabPage(tabs_setting, "Output")
manual_box = oasysgui.widgetBox(tab_input, "Manual Entry", addSpace=True, orientation="vertical")
oasysgui.lineEdit(manual_box, self, "entry_number", "Entry Number",
labelWidth=300, valueType=int, orientation="horizontal")
gui.separator(manual_box)
button = gui.button(manual_box, self, "Retrieve Profile", callback=self.retrieve_profile)
button.setFixedHeight(35)
button.setFixedWidth(self.CONTROL_AREA_WIDTH-35)
input_box = oasysgui.widgetBox(tab_input, "Search Parameters", addSpace=True, orientation="vertical")
gui.comboBox(input_box, self, "shape", label="Mirror Shape", labelWidth=300,
items=["All", "Plane", "Cylindrical", "Elliptical", "Toroidal", "Spherical"],
sendSelectedValue=False, orientation="horizontal")
gui.separator(input_box)
input_box_1 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_from", "Slope Error From (" + u"\u03BC" + "rad)",
labelWidth=150, valueType=float, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_to", "To (" + u"\u03BC" + "rad)",
labelWidth=60, valueType=float, orientation="horizontal")
input_box_2 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
self.le_dimension_y_from = oasysgui.lineEdit(input_box_2, self, "dimension_y_from", "Mirror Length From",
labelWidth=150, valueType=float, orientation="horizontal")
self.le_dimension_y_to = oasysgui.lineEdit(input_box_2, self, "dimension_y_to", "To",
labelWidth=60, valueType=float, orientation="horizontal")
table_box = oasysgui.widgetBox(tab_input, "Search Results", addSpace=True, orientation="vertical", height=250)
self.overlay_search = Overlay(table_box, self.search_profiles)
self.overlay_search.hide()
button = gui.button(input_box, self, "Search", callback=self.overlay_search.show)
button.setFixedHeight(35)
button.setFixedWidth(self.CONTROL_AREA_WIDTH-35)
gui.comboBox(table_box, self, "use_undetrended", label="Use Undetrended Profile", labelWidth=300,
items=["No", "Yes"], callback=self.table_item_clicked, sendSelectedValue=False, orientation="horizontal")
gui.separator(table_box)
self.scrollarea = QScrollArea()
self.scrollarea.setMinimumWidth(self.CONTROL_AREA_WIDTH-35)
table_box.layout().addWidget(self.scrollarea, alignment=Qt.AlignHCenter)
self.table = QTableWidget(1, 5)
self.table.setStyleSheet("background-color: #FBFBFB;")
self.table.setAlternatingRowColors(True)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.table.verticalHeader().setVisible(False)
self.table.setColumnWidth(0, 40)
self.table.setColumnWidth(1, 70)
self.table.setColumnWidth(2, 70)
self.table.setColumnWidth(3, 85)
self.table.setColumnWidth(4, 80)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.scrollarea.setWidget(self.table)
self.scrollarea.setWidgetResizable(1)
output_profile_box = oasysgui.widgetBox(tab_gener, "Surface Generation Parameters", addSpace=True, orientation="vertical", height=370)
gui.comboBox(output_profile_box, self, "center_y", label="Center Profile in the middle of O.E.", labelWidth=300,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
gui.separator(output_profile_box)
gui.comboBox(output_profile_box, self, "modify_y", label="Modify Length?", labelWidth=150,
items=["No", "Rescale to new length", "Fit to new length (fill or cut)"], callback=self.set_ModifyY, sendSelectedValue=False, orientation="horizontal")
self.modify_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.modify_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_1 = oasysgui.lineEdit(self.modify_box_2, self, "new_length", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
self.modify_box_3 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_2 = oasysgui.lineEdit(self.modify_box_3, self, "new_length", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.modify_box_3, self, "filler_value", "Filler Value (if new length > profile length) [nm]", labelWidth=300, valueType=float, orientation="horizontal")
self.set_ModifyY()
gui.comboBox(output_profile_box, self, "renormalize_y", label="Renormalize Length Profile to different RMS", labelWidth=300,
items=["No", "Yes"], callback=self.set_RenormalizeY, sendSelectedValue=False, orientation="horizontal")
self.output_profile_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.output_profile_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
gui.comboBox(self.output_profile_box_1, self, "error_type_y", label="Normalization to", labelWidth=270,
items=["Figure Error (nm)", "Slope Error (" + u"\u03BC" + "rad)"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.output_profile_box_1, self, "rms_y", "Rms Value",
labelWidth=300, valueType=float, orientation="horizontal")
self.set_RenormalizeY()
output_box = oasysgui.widgetBox(tab_gener, "Outputs", addSpace=True, orientation="vertical")
select_file_box = oasysgui.widgetBox(output_box, "", addSpace=True, orientation="horizontal")
self.le_heigth_profile_file_name = oasysgui.lineEdit(select_file_box, self, "heigth_profile_file_name", "Output File Name",
labelWidth=120, valueType=str, orientation="horizontal")
gui.button(select_file_box, self, "...", callback=self.selectFile)
self.shadow_output = oasysgui.textArea(height=400)
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=500)
out_box.layout().addWidget(self.shadow_output)
gui.rubber(self.controlArea)
self.initializeTabs()
gui.rubber(self.mainArea)
self.overlay_search.raise_()
def resizeEvent(self, event):
self.overlay_search.resize(self.CONTROL_AREA_WIDTH - 15, 290)
event.accept()
def after_change_workspace_units(self):
self.si_to_user_units = 1e2 / self.workspace_units_to_cm
self.horHeaders = ["Entry", "Shape", "Length\n[" + self.workspace_units_label + "]", "Heights St.Dev.\n[nm]", "Slopes St.Dev.\n[" + u"\u03BC" + "rad]"]
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.plot_canvas[0].setGraphXLabel("Y [" + self.workspace_units_label + "]")
self.plot_canvas[1].setGraphXLabel("Y [" + self.workspace_units_label + "]")
self.plot_canvas[5].setGraphXLabel("Y [" + self.workspace_units_label + "]")
label = self.le_dimension_y_from.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_dimension_y_to.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_1.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_2.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def initializeTabs(self):
self.tabs = oasysgui.tabWidget(self.mainArea)
self.tab = [oasysgui.createTabPage(self.tabs, "Info"),
oasysgui.createTabPage(self.tabs, "Heights Profile"),
oasysgui.createTabPage(self.tabs, "Slopes Profile"),
oasysgui.createTabPage(self.tabs, "PSD Heights"),
oasysgui.createTabPage(self.tabs, "CSD Heights"),
oasysgui.createTabPage(self.tabs, "ACF"),
oasysgui.createTabPage(self.tabs, "Generated 1D Profile"),
]
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.plot_canvas = [None, None, None, None, None, None]
self.plot_canvas[0] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[0].setDefaultPlotLines(True)
self.plot_canvas[0].setActiveCurveColor(color='blue')
self.plot_canvas[0].setGraphYLabel("Z [nm]")
self.plot_canvas[0].setGraphTitle("Heights Profile")
self.plot_canvas[0].setInteractiveMode(mode='zoom')
self.plot_canvas[1] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[1].setDefaultPlotLines(True)
self.plot_canvas[1].setActiveCurveColor(color='blue')
self.plot_canvas[1].setGraphYLabel("Zp [$\mu$rad]")
self.plot_canvas[1].setGraphTitle("Slopes Profile")
self.plot_canvas[1].setInteractiveMode(mode='zoom')
self.plot_canvas[2] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[2].setDefaultPlotLines(True)
self.plot_canvas[2].setActiveCurveColor(color='blue')
self.plot_canvas[2].setGraphXLabel("f [m^-1]")
self.plot_canvas[2].setGraphYLabel("PSD [m^3]")
self.plot_canvas[2].setGraphTitle("Power Spectral Density of Heights Profile")
self.plot_canvas[2].setInteractiveMode(mode='zoom')
self.plot_canvas[2].setXAxisLogarithmic(True)
self.plot_canvas[2].setYAxisLogarithmic(True)
self.plot_canvas[3] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[3].setDefaultPlotLines(True)
self.plot_canvas[3].setActiveCurveColor(color='blue')
self.plot_canvas[3].setGraphXLabel("f [m^-1]")
self.plot_canvas[3].setGraphYLabel("CSD [m^3]")
self.plot_canvas[3].setGraphTitle("Cumulative Spectral Density of Heights Profile")
self.plot_canvas[3].setInteractiveMode(mode='zoom')
self.plot_canvas[3].setXAxisLogarithmic(True)
self.plot_canvas[4] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[4].setDefaultPlotLines(True)
self.plot_canvas[4].setActiveCurveColor(color='blue')
self.plot_canvas[4].setGraphXLabel("Length [m]")
self.plot_canvas[4].setGraphYLabel("ACF")
self.plot_canvas[4].setGraphTitle("Autocovariance Function of Heights Profile")
self.plot_canvas[4].setInteractiveMode(mode='zoom')
self.plot_canvas[5] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[5].setDefaultPlotLines(True)
self.plot_canvas[5].setActiveCurveColor(color='blue')
self.plot_canvas[5].setGraphYLabel("Z [nm]")
self.plot_canvas[5].setGraphTitle("Heights Profile")
self.plot_canvas[5].setInteractiveMode(mode='zoom')
self.profileInfo = oasysgui.textArea(height=self.IMAGE_HEIGHT-5, width=400)
profile_box = oasysgui.widgetBox(self.tab[0], "", addSpace=True, orientation="horizontal", height = self.IMAGE_HEIGHT, width=410)
profile_box.layout().addWidget(self.profileInfo)
for index in range(0, 6):
self.tab[index+1].layout().addWidget(self.plot_canvas[index])
self.tabs.setCurrentIndex(1)
def plot_dabam_graph(self, plot_canvas_index, curve_name, x_values, y_values, xtitle, ytitle, title="", color='blue', replace=True):
self.plot_canvas[plot_canvas_index].addCurve(x_values, y_values, curve_name, symbol='', color=color, replace=replace) #'+', '^', ','
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].replot()
def set_ModifyY(self):
self.modify_box_1.setVisible(self.modify_y == 0)
self.modify_box_2.setVisible(self.modify_y == 1)
self.modify_box_3.setVisible(self.modify_y == 2)
def set_RenormalizeY(self):
self.output_profile_box_1.setVisible(self.renormalize_y==1)
def table_item_clicked(self):
if self.table.selectionModel().hasSelection():
if not self.table.rowCount() == 0:
if not self.table.item(0, 0) is None:
row = self.table.selectionModel().selectedRows()[0].row()
self.entry_number = int(self.table.item(row, 0).text())
self.retrieve_profile()
def retrieve_profile(self):
try:
if self.entry_number is None or self.entry_number <= 0:
raise Exception("Entry number should be a strictly positive integer number")
self.server.load(self.entry_number)
self.profileInfo.setText(self.server.info_profiles())
title0 = "Heights Profile. St.Dev.=%.3f nm" % (self.server.stdev_profile_heights() * 1e9)
title1 = "Slopes Profile. St.Dev.=%.3f $\mu$rad" % (self.server.stdev_profile_slopes() * 1e6)
if self.use_undetrended == 0:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeights, "Y [" + self.workspace_units_label + "]", "Z [nm]",
title=title0)
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y, 1e6 * self.server.zSlopes,
"Y [" + self.workspace_units_label + "]", "Zp [$\mu$rad]",
title=title1)
else:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeightsUndetrended, "Y [" + self.workspace_units_label + "]", "Z [nm]",
title=title0)
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y,
1e6 * self.server.zSlopesUndetrended, "Y [" + self.workspace_units_label + "]", "Zp [$\mu$rad]",
title=title1)
y = self.server.f ** (self.server.powerlaw["hgt_pendent"]) * 10 ** self.server.powerlaw["hgt_shift"]
i0 = self.server.powerlaw["index_from"]
i1 = self.server.powerlaw["index_to"]
beta = -self.server.powerlaw["hgt_pendent"]
self.plot_dabam_graph(2, "psd_heights_2", self.server.f, self.server.psdHeights, "f [m^-1]", "PSD [m^3]",
title="Power Spectral Density of Heights Profile (beta=%.2f,Df=%.2f)" % (beta, (5 - beta) / 2))
self.plot_dabam_graph(2, "psd_heights_1", self.server.f, y, "f [m^-1]", "PSD [m^3]", color='green',
replace=False)
self.plot_dabam_graph(2, "psd_heights_3", self.server.f[i0:i1], y[i0:i1], "f [m^-1]", "PSD [m^3]", color='red',
replace=False)
self.plot_dabam_graph(3, "csd", self.server.f, self.server.csd_heights(), "f [m^-1]", "CSD [m^3]")
c1, c2, c3 = dabam.autocorrelationfunction(self.server.y, self.server.zHeights)
self.plot_dabam_graph(4, "acf", c1[0:-1], c2, "Length [m]", "Heights Autocovariance",
title="Autocovariance Function of Heights Profile.\nAutocorrelation Length (ACF=0.5)=%.3f m" % (c3))
# surface error removal
if not self.yy is None and not self.xx is None:
self.xx = None
self.yy = None
self.plot_dabam_graph(5, "heights_profile_generated", numpy.zeros(1), numpy.zeros(1), "Y [" + self.workspace_units_label + "]", "Z [nm]")
if (self.tabs.currentIndex()==6): self.tabs.setCurrentIndex(1)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
#raise exception
def search_profiles(self):
try:
self.table.itemClicked.disconnect(self.table_item_clicked)
self.table.clear()
row_count = self.table.rowCount()
for n in range(0, row_count):
self.table.removeRow(0)
self.table.setHorizontalHeaderLabels(self.horHeaders)
profiles = dabam.dabam_summary_dictionary(surface=self.get_dabam_shape(),
slp_err_from=self.slope_error_from*1e-6,
slp_err_to=self.slope_error_to*1e-6,
length_from=self.dimension_y_from / self.si_to_user_units,
length_to=self.dimension_y_to / self.si_to_user_units)
for index in range(0, len(profiles)):
self.table.insertRow(0)
for index in range(0, len(profiles)):
table_item = QTableWidgetItem(str(profiles[index]["entry"]))
table_item.setTextAlignment(Qt.AlignCenter)
self.table.setItem(index, 0, table_item)
table_item = QTableWidgetItem(str(profiles[index]["surface"]))
table_item.setTextAlignment(Qt.AlignLeft)
self.table.setItem(index, 1, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["length"]*self.si_to_user_units, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 2, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["hgt_err"]*1e9, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 3, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["slp_err"]*1e6, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 4, table_item)
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.overlay_search.hide()
except Exception as exception:
self.overlay_search.hide()
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def get_dabam_shape(self):
if self.shape == 0: return None
elif self.shape == 1: return "plane"
elif self.shape == 2: return "cylindrical"
elif self.shape == 3: return "elliptical"
elif self.shape == 4: return "toroidal"
elif self.shape == 5: return "spherical"
def calculate_heigth_profile_ni(self):
self.calculate_heigth_profile(not_interactive_mode=True)
def calculate_heigth_profile(self, not_interactive_mode=False):
import matplotlib
print (matplotlib.__version__)
try:
if self.server.y is None: raise Exception("No Profile Selected")
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
self.check_fields()
# PREVENTS CRASH WITH PYQT5
if not not_interactive_mode: self.tabs.setCurrentIndex(6)
if self.modify_y == 2:
profile_1D_y_x_temp = self.si_to_user_units * self.server.y
if self.use_undetrended == 0: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeightsUndetrended
first_coord = profile_1D_y_x_temp[0]
second_coord = profile_1D_y_x_temp[1]
last_coord = profile_1D_y_x_temp[-1]
step = numpy.abs(second_coord - first_coord)
length = numpy.abs(last_coord - first_coord)
n_points_old = len(profile_1D_y_x_temp)
if self.new_length > length:
difference = self.new_length - length
n_added_points = int(difference/step)
if difference % step == 0:
n_added_points += 1
if n_added_points % 2 != 0:
n_added_points += 1
profile_1D_y_x = numpy.arange(n_added_points + n_points_old) * step
profile_1D_y_y = numpy.ones(n_added_points + n_points_old) * self.filler_value * 1e-9 * self.si_to_user_units
profile_1D_y_y[int(n_added_points/2) : n_points_old + int(n_added_points/2)] = profile_1D_y_y_temp
elif self.new_length < length:
difference = length - self.new_length
n_removed_points = int(difference/step)
if difference % step == 0:
n_removed_points -= 1
if n_removed_points % 2 != 0:
n_removed_points -= 1
if n_removed_points >= 2:
profile_1D_y_x = profile_1D_y_x_temp[0 : (n_points_old - n_removed_points)]
profile_1D_y_y = profile_1D_y_y_temp[(int(n_removed_points/2) - 1) : (n_points_old - int(n_removed_points/2) - 1)]
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
if self.modify_y == 0:
profile_1D_y_x = self.si_to_user_units * self.server.y
elif self.modify_y == 1:
scale_factor_y = self.new_length/(self.si_to_user_units * (max(self.server.y)-min(self.server.y)))
profile_1D_y_x = self.si_to_user_units * self.server.y * scale_factor_y
if self.use_undetrended == 0: profile_1D_y_y = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y = self.si_to_user_units * self.server.zHeightsUndetrended
if self.center_y:
first_coord = profile_1D_y_x[0]
last_coord = profile_1D_y_x[-1]
length = numpy.abs(last_coord - first_coord)
profile_1D_y_x_temp = numpy.linspace(-length/2, length/2, len(profile_1D_y_x))
profile_1D_y_x = profile_1D_y_x_temp
if self.renormalize_y == 0:
rms_y = None
else:
if self.error_type_y == profiles_simulation.FIGURE_ERROR:
rms_y = self.si_to_user_units * self.rms_y * 1e-9 # from nm to user units
profile_1D_y_y = profile_1D_y_y / profile_1D_y_y.std() * rms_y
else:
rms_y = self.rms_y * 1e-6 # from urad to rad
yslopes = numpy.gradient(profile_1D_y_y, profile_1D_y_x[1]-profile_1D_y_x[0])
profile_1D_y_y = profile_1D_y_y / yslopes.std() * rms_y
self.xx = profile_1D_y_x
self.yy = profile_1D_y_y
x_to_plot = self.xx
y_to_plot = self.yy * 1e9 / self.si_to_user_units
ny = len(self.yy)
slope = numpy.zeros(ny)
for i in range(ny-1):
step = self.xx[i+1] - self.xx[i]
slope[i] = numpy.arctan( (self.yy[i+1] - self.yy[i] ) / step )
slope[ny-1] = slope[ny-2]
title = ' Slope error rms : %f $\mu$rad' % (slope.std() * 1e6) + '\n' + \
' Figure error rms : %f nm' % (round(self.yy.std()* 1e9 / self.si_to_user_units, 6))
self.plot_dabam_graph(5, "heights_profile_generated", x_to_plot, y_to_plot, "Y [" + self.workspace_units_label + "]", "Z [nm]", title=title)
if not not_interactive_mode:
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile calculated: if the result is satisfactory,\nclick \'Generate Height Profile File\' to complete the operation ",
QMessageBox.Ok)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
#raise exception
def generate_heigth_profile_file_ni(self):
self.generate_heigth_profile_file(not_interactive_mode=True)
def generate_heigth_profile_file(self, not_interactive_mode=False):
if not self.yy is None and not self.xx is None:
try:
congruence.checkDir(self.heigth_profile_file_name)
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
numpy.savetxt(self.heigth_profile_file_name, self.yy)
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile file " + self.heigth_profile_file_name + " written on disk",
QMessageBox.Ok)
self.send("PreInput", WiserPreInputData(figure_error_file=self.heigth_profile_file_name,
figure_error_step=numpy.abs(self.xx[1]-self.xx[0]),
figure_user_units_to_m=self.workspace_units_to_m))
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def call_reset_settings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
def check_fields(self):
if self.modify_y == 1 or self.modify_y == 2:
self.new_length = congruence.checkStrictlyPositiveNumber(self.new_length, "New Length")
if self.renormalize_y == 1:
self.rms_y = congruence.checkPositiveNumber(self.rms_y, "Rms Y")
congruence.checkDir(self.heigth_profile_file_name)
def writeStdOut(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def selectFile(self):
self.le_heigth_profile_file_name.setText(oasysgui.selectFileFromDialog(self, self.heigth_profile_file_name, "Select Output File", file_extension_filter="Data Files (*.dat)"))
class Overlay(QWidget):
def __init__(self, container_widget=None, target_method=None):
QWidget.__init__(self, container_widget)
self.container_widget = container_widget
self.target_method = target_method
palette = QPalette(self.palette())
palette.setColor(palette.Background, Qt.transparent)
self.setPalette(palette)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(QColor(255, 255, 255, 127)))
painter.setPen(QPen(Qt.NoPen))
for i in range(1, 7):
if self.position_index == i:
painter.setBrush(QBrush(QColor(255, 165, 0)))
else:
painter.setBrush(QBrush(QColor(127, 127, 127)))
painter.drawEllipse(
self.width()/2 + 30 * numpy.cos(2 * numpy.pi * i / 6.0) - 10,
self.height()/2 + 30 * numpy.sin(2 * numpy.pi * i / 6.0) - 10,
20, 20)
time.sleep(0.005)
painter.end()
def showEvent(self, event):
self.timer = self.startTimer(0)
self.counter = 0
self.position_index = 0
t = threading.Thread(target=self.target_method)
t.start()
def hideEvent(self, QHideEvent):
self.killTimer(self.timer)
def timerEvent(self, event):
self.counter += 1
self.position_index += 1
if self.position_index == 7: self.position_index = 1
self.update()
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWdabam_height_profile()
w.si_to_user_units = 100
w.show()
app.exec()
w.saveSettings()
|
__init__.py
|
from threading import Thread
import __orange__ as API
import GTAOrange.player as Player
import GTAOrange.vehicle as Vehicle
import GTAOrange.blip as Blip
import GTAOrange.text as Text
import GTAOrange.marker as Marker
import GTAOrange.object as Object
def _sendPlayerList(target):
players = Player.getAll()
target.chatMsg("Players:")
for key, player in players.items():
target.chatMsg(player.getName())
def _threadTest():
print("Sleeping...")
i = 0
while True:
print(i)
i += 1
print("Woke up!")
def onEventStart(bla, bli):
print(bla)
print(bli)
return True
def onPlayerConnect(player, ip):
print('Player:connect | ' + str(player.getName()) + ' | ' + ip)
player.setPosition(100.0, -1940.0, 21.0)
# own attributes
player.testveh = None
player.testblip = None
# trying player-local events
player.on("leftvehicle", onPlayerLeftVehicle)
return True
def onPlayerDisconnect(player, reason):
print('Player:disconnect | ' + str(player) + ' | ' + str(reason))
def onPlayerCommand(player, command):
# print('Player:command | ' + str(player.getID()) + ' | ' + command)
command = command.split()
# player commands
if(command[0] == "/setpos"):
player.setPosition(float(command[1]), float(
command[2]), float(command[3]))
elif(command[0] == "/players"):
_sendPlayerList(player)
elif(command[0] == "/getpos"):
x, y, z = player.getPosition()
player.chatMsg("{:.9f}".format(x) + "|" +
"{:.9f}".format(y) + "|" + "{:.9f}".format(z))
cords = player.getPosition()
print(cords)
elif(command[0] == "/sethead"):
player.setHeading(float(command[1]))
elif(command[0] == "/gethead"):
player.chatMsg(str(player.chatMsg(player.getHeading())))
elif(command[0] == "/removeweapons"):
player.removeWeapons()
elif(command[0] == "/giveweapon"):
player.giveWeapon(int(command[1]), int(command[2]))
elif(command[0] == "/giveammo"):
pass
elif(command[0] == "/givemoney"):
player.giveMoney(int(command[1]))
elif(command[0] == "/setmoney"):
player.setMoney(int(command[1]))
elif(command[0] == "/resetmoney"):
player.resetMoney()
elif(command[0] == "/getmoney"):
player.chatMsg(str(player.getMoney()))
elif(command[0] == "/setmodel"):
player.setModel(int(command[1]))
elif(command[0] == "/getmodel"):
player.chatMsg(str(player.getModel()))
elif(command[0] == "/setname"):
player.setName(command[1])
elif(command[0] == "/getname"):
player.chatMsg(player.getName())
elif(command[0] == "/sethealth"):
player.setHealth(float(command[1]))
elif(command[0] == "/gethealth"):
player.chatMsg(str(player.getHealth()))
elif(command[0] == "/setarmour"):
player.setArmour(float(command[1]))
elif(command[0] == "/getarmour"):
player.chatMsg(str(player.getArmour()))
elif(command[0] == "/playerblip"):
player.attachBlip(player.getName() + "'s super special blip", 2)
elif(command[0] == "/nullblip"):
API.CreateBlipForAll("0|0|0", 0.0, 0.0, 70.0, 1.0, 17, 11)
elif(command[0] == "/setcolor"):
val = API.SetPlayerColor(player, int(command[1]))
print(val)
elif(command[0] == "/getcolor"):
val = API.GetPlayerColor(player)
print(val)
elif(command[0] == "/broadcast"):
player.broadcast(command[1], int(command[2]))
elif(command[0] == "/sendmessage"):
player.chatMsg(command[1])
elif(command[0] == "/disablehud"):
if command[1] == 1:
player.disableHUD()
else:
player.enableHUD()
# thread tests
elif command[0] == "/thread":
t = Thread(target=_threadTest)
t.daemon = True
t.start()
# vehicle commands
elif command[0] == "/veh":
if command[1] == "create":
if player.testveh is None:
x, y, z = player.getPosition()
player.testveh = Vehicle.create(
"Burrito", x, y, z, player.getHeading())
player.chatMsg("Created a Burrito! :-) | ID: " +
str(player.testveh.id))
else:
player.chatMsg("Please delete your car before!")
elif command[1] == "delete":
if player.testveh is not None:
player.testveh.delete()
player.testveh = None
else:
player.chatMsg("Please create a car before!")
elif command[1] == "getpos":
if player.testveh is not None:
#x, y, z = player.testveh.getPosition()
#player.chatMsg("{:.9f}".format(x) + "|" + "{:.9f}".format(y) + "|" + "{:.9f}".format(z))
val = player.testveh.getPosition()
print(val)
else:
player.chatMsg("Please create a car before!")
elif command[1] == "setpos":
pass
# blip commands
elif command[0] == "/blip":
if command[1] == "create":
if player.testveh is None:
x, y, z = player.getPosition()
player.testblip = Blip.create("TADAAAA", x, y, 90)
player.chatMsg("Created a fancy blip! :-) | ID: " +
str(player.testblip.id))
else:
player.chatMsg("Please delete your car before!")
elif command[1] == "delete":
if player.testblip is not None:
player.testblip.delete()
player.testblip = None
else:
player.chatMsg("Please create a blip before!")
elif command[1] == "getpos":
if player.testblip is not None:
#x, y, z = player.testveh.getPosition()
#player.chatMsg("{:.9f}".format(x) + "|" + "{:.9f}".format(y) + "|" + "{:.9f}".format(z))
val = player.testblip.getPosition()
print(val)
else:
player.chatMsg("Please create a car before!")
elif command[1] == "setpos":
pass
# 3dtext commands
elif command[0] == "/3dtext":
if command[1] == "create":
x, y, z = player.getPosition()
text = Text.create("Test", x, y, z)
player.chatMsg(
"Created a fancy 3d text! :-) | ID: " + str(text.id))
elif command[1] == "delete":
text = Text.getByID(int(command[2]))
if text is not False:
text.delete()
elif command[1] == "getpos":
text = Text.getByID(int(command[2]))
if text is not False:
val = text.getPosition()
print(val)
elif command[1] == "setpos":
pass
# marker commands
elif command[0] == "/marker":
if command[1] == "create":
x, y, z = player.getPosition()
marker = Marker.create(x, y, z)
marker.on("playerentered", onPlayerEnteredMarker)
player.chatMsg("Created a fancy marker! :-) | ID: " +
str(marker.id))
elif command[1] == "delete":
marker = Marker.getByID(int(command[2]))
if marker is not False:
marker.delete()
elif command[1] == "getpos":
marker = Marker.getByID(int(command[2]))
if marker is not False:
val = marker.getPosition()
print(val)
elif command[1] == "setpos":
pass
# object commands
elif command[0] == "/object":
if command[1] == "create":
x, y, z = player.getPosition()
obj = Object.create(1204839864, x, y, z, 1.0, 1.0, 1.0)
player.chatMsg("Created a fancy object! :-) | ID: " + str(obj.id))
elif command[1] == "delete":
obj = Object.getByID(int(command[2]))
if obj is not False:
obj.delete()
else:
print(' '.join(command))
return True
def onPlayerEnteredVehicle(player, veh):
print('Vehicle:playerentered | ' +
str(player.getID()) + ' | ' + str(veh.getID()))
def onPlayerLeftVehicle(player, veh):
print('Vehicle:playerleft | ' + str(player.getID()) + ' | ' + str(veh.getID()))
def onPlayerEnteredMarker(marker, player):
x, y, z = marker.getPosition()
player.setPosition(x, y, z + 5)
Player.on("connect", onPlayerConnect)
Player.on("command", onPlayerCommand)
Vehicle.on("playerentered", onPlayerEnteredVehicle)
#API.TriggerServerEvent("PlayerCommand", [0,"Test"])
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_mona.base_wizard import BaseWizard
from electrum_mona.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_mona.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum_mona/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum_mona/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum_mona/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum_mona/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<SeedDialogHeader@GridLayout>
text: ''
options_dialog: None
rows: 1
orientation: 'horizontal'
size_hint: 1, None
height: self.minimum_height
BigLabel:
size_hint: 9, None
text: root.text
IconButton:
id: options_button
height: '30dp'
width: '30dp'
size_hint: 1, None
icon: 'atlas://electrum_mona/gui/kivy/theming/light/gear'
on_release:
root.options_dialog() if root.options_dialog else None
<RestoreSeedDialog>
message: ''
word: ''
SeedDialogHeader:
id: seed_dialog_header
text: 'ENTER YOUR SEED PHRASE'
options_dialog: root.options_dialog
GridLayout:
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum_mona/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
SeedDialogHeader:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
options_dialog: root.options_dialog
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '48dp'
SeedLabel:
text: root.warning
<ChoiceLineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
SeedLabel:
text: root.message2
TextInput:
id: text_input
multiline: False
size_hint: 1, None
height: '48dp'
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.title = kwargs.get('message', '')
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
self.init_choices(choices)
def init_choices(self, choices):
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message = kwargs.get('message', '')
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class CLButton(ToggleButton):
def on_release(self):
self.root.script_type = self.script_type
self.root.set_text(self.value)
class ChoiceLineDialog(WizardChoiceDialog):
title = StringProperty('')
message1 = StringProperty('')
message2 = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message1 = kwargs.get('message1', '')
self.message2 = kwargs.get('message2', '')
self.choices = kwargs.get('choices', [])
default_choice_idx = kwargs.get('default_choice_idx', 0)
self.ids.next.disabled = False
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for idx, (script_type, title, text) in enumerate(self.choices):
b = CLButton(text=title, height='30dp', group=self.title, allow_no_selection=False)
b.script_type = script_type
b.root = self
b.value = text
layout.add_widget(b)
if idx == default_choice_idx:
b.trigger_action(duration=0)
def set_text(self, value):
self.ids.text_input.text = value
def get_params(self, b):
return (self.ids.text_input.text, self.script_type)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, _):
self.ext = ext
d = SeedOptionsDialog(self.ext, None, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_mona.mnemonic import Mnemonic
from electrum_mona.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
self.bip39 = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, bip39):
self.ext = ext
self.bip39 = bip39
self.update_next_button()
d = SeedOptionsDialog(self.ext, self.bip39, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def update_next_button(self):
self.ids.next.disabled = False if self.bip39 else not bool(self._test(self.get_text()))
def on_text(self, dt):
self.update_next_button()
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), self.bip39, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def __init__(self, *args, **kwargs):
RestoreSeedDialog.__init__(self, *args, **kwargs)
self.ids.seed_dialog_header.ids.options_button.disabled = True
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum_mona/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, aborted=False):
if storage is None and not aborted:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def choice_and_line_dialog(self, **kwargs): ChoiceLineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
test_consume.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 14:57
from multiprocessing import Process
import time
from funboost import get_consumer, get_publisher, AbstractConsumer
from funboost.consumers.redis_consumer import RedisConsumer
from funboost.utils import LogManager
logger = LogManager('complex_example').get_logger_and_add_handlers()
pb2 = get_publisher('task2_queue', broker_kind=2)
def task1(x, y):
logger.info(f'消费此消息 {x} - {y} ,结果是 {x - y}')
for i in range(10):
pb2.publish({'n': x * 100 + i}) # 消费时候发布任务到别的队列或自己的队列。可以边消费边推送。
time.sleep(10) # 模拟做某事需要阻塞10秒种,必须用并发绕过此阻塞。
def task2(n):
logger.info(n)
time.sleep(3)
def multi_processing_consume():
get_consumer('task1_queue', consuming_function=task1, broker_kind=2).start_consuming_message()
RedisConsumer('task2_queue', consuming_function=task2, threads_num=100).start_consuming_message()
AbstractConsumer.join_shedual_task_thread() # linux多进程启动时候一定要加这一句,否则即使是while 1 的线程如果不join,子进程也会迅速退出。windows下可以不需要这一句。
if __name__ == '__main__':
[Process(target=multi_processing_consume).start() for _ in range(4)]
|
process_data_shared_ctypes.py
|
""" process data """
import multiprocessing as mp
from multiprocessing.sharedctypes import Synchronized
def increment_process_count(process_count: Synchronized):
""" inc process cnt """
with process_count.get_lock():
process_count.value += 1
print(process_count.value)
def run():
process_count: Synchronized = mp.Value('i', 0)
print("start process count: ", process_count.value)
increment_processes = []
for _ in range(8):
the_process = mp.Process(
target=increment_process_count, args=(process_count,))
the_process.start()
increment_processes.append(the_process)
for p in increment_processes:
p.join()
print("end process count: ", process_count.value)
if __name__ == "__main__":
run()
|
conftest.py
|
import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import (
aws,
deployment_openshift_logging as ocp_logging_obj,
ibmcloud,
kms as KMS,
templating,
users,
)
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
skipif_ui,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import create_unique_resource_name
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.couchbase import CouchBase
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ripsaw import RipSaw
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version, skipif_upgraded_from or skipif_no_kms
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or deploy or skip_ocs_deployment):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
skipif_no_kms_marker = item.get_closest_marker("skipif_no_kms")
skipif_ui_marker = item.get_closest_marker("skipif_ui")
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
if skipif_no_kms_marker:
try:
if not is_kms_enabled():
log.info(
f"Test: {item} will be skipped because the OCS cluster"
f" has not configured cluster-wide encryption with KMS"
)
items.remove(item)
except KeyError:
log.warning(
"Cluster is not yet installed. Skipping skipif_no_kms check."
)
if skipif_ui_marker:
skip_condition = skipif_ui_marker
if skipif_ui(skip_condition.args[0]):
log.info(
f"Test: {item} will be skipped due to UI test {skip_condition} is not avalible"
)
items.remove(item)
continue
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
rbd_thick_provision=False,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface is CephBlockPool
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
rbd_thick_provision=rbd_thick_provision,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the project
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
if config.DEPLOYMENT["skip_download_client"]:
log.info("Skipping client download")
else:
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
else:
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
ibmcloud.login()
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
# Force-skipping in case of IBM Cloud -
# https://github.com/red-hat-storage/ocs-ci/issues/3863
if config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM:
pytest.skip(
"RGW deployments were found, but test will be skipped because of BZ1926831"
)
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if bucketclass:
interface = bucketclass["interface"]
current_call_created_buckets = []
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for _ in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
*args,
**kwargs,
)
current_call_created_buckets.append(created_bucket)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return current_call_created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
logging_channel = "stable" if ocp_version >= "4.7" else ocp_version
# Creates namespace openshift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = logging_channel
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = logging_channel
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_factory_fixture(request):
"""
Couchbase factory fixture
"""
couchbase = CouchBase()
def factory(
replicas=3,
run_in_bg=False,
skip_analyze=True,
sc_name=None,
num_items=None,
num_threads=None,
):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Setup couchbase
couchbase.setup_cb()
# Create couchbase workers
couchbase.create_couchbase_worker(replicas=replicas, sc_name=sc_name)
# Run couchbase workload
couchbase.run_workload(
replicas=replicas,
run_in_bg=run_in_bg,
num_items=num_items,
num_threads=num_threads,
)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown:
return
elif dev_mode:
log.info("Skipping alert check for development mode")
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
ocp_dedicated = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (ocp_dedicated and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up again
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by restarting the nodes
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if hasattr(request.node.config, "py_test_service"):
rp_service = request.node.config.py_test_service
if not hasattr(rp_service.RP, "rp_client"):
request.config._metadata[
"RP Launch URL:"
] = "Problem with RP, launch URL is not available!"
return
launch_id = rp_service.RP.rp_client.launch_id
project = rp_service.RP.rp_client.project
endpoint = rp_service.RP.rp_client.endpoint
launch_url = f"{endpoint}/ui/#{project}/launches/all/{launch_id}/{launch_id}"
config.REPORTING["rp_launch_url"] = launch_url
config.REPORTING["rp_launch_id"] = launch_id
config.REPORTING["rp_endpoint"] = endpoint
config.REPORTING["rp_project"] = project
request.config._metadata["RP Launch URL:"] = launch_url
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="function")
def setup_ui(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def ripsaw(request):
# Create benchmark Operator (formerly ripsaw)
ripsaw = RipSaw()
def teardown():
ripsaw.cleanup()
time.sleep(10)
request.addfinalizer(teardown)
return ripsaw
|
stats_server.py
|
#!/usr/bin/env python
"""Stats server implementation."""
import BaseHTTPServer
import collections
import json
import socket
import threading
import logging
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
def _JSONMetricValue(metric_info, value):
if metric_info.metric_type == stats.MetricType.EVENT:
return dict(
sum=value.sum,
counter=value.count,
bins_heights=collections.OrderedDict(value.bins_heights))
else:
return value
def BuildVarzJsonString():
"""Builds Varz JSON string from all stats metrics."""
results = {}
for name, metric_info in stats.STATS.GetAllMetricsMetadata().iteritems():
info_dict = dict(metric_type=metric_info.metric_type.name)
if metric_info.value_type:
info_dict["value_type"] = metric_info.value_type.name
if metric_info.docstring:
info_dict["docstring"] = metric_info.docstring
if metric_info.units:
info_dict["units"] = metric_info.units.name
if metric_info.fields_defs:
info_dict["fields_defs"] = []
for field_def in metric_info.fields_defs:
info_dict["fields_defs"].append(
(field_def.field_name, utils.SmartStr(field_def.field_type)))
value = {}
all_fields = stats.STATS.GetMetricFields(name)
for f in all_fields:
joined_fields = ":".join(utils.SmartStr(fname) for fname in f)
value[joined_fields] = _JSONMetricValue(
metric_info, stats.STATS.GetMetricValue(
name, fields=f))
else:
value = _JSONMetricValue(metric_info, stats.STATS.GetMetricValue(name))
results[name] = dict(info=info_dict, value=value)
encoder = json.JSONEncoder()
return encoder.encode(results)
class StatsServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Default stats server implementation."""
def do_GET(self): # pylint: disable=g-bad-name
if self.path == "/varz":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(BuildVarzJsonString())
else:
self.send_error(403, "Access forbidden: %s" % self.path)
class StatsServer(object):
def __init__(self, port):
self.port = port
def Start(self):
"""Start HTTPServer."""
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config_lib.CONFIG.Get("AdminUI.port_max",
config_lib.CONFIG["AdminUI.port"])
additional_ports = adminui_max_port - config_lib.CONFIG["AdminUI.port"]
max_port = self.port + additional_ports
for port in range(self.port, max_port + 1):
# Make a simple reference implementation WSGI server
try:
server = BaseHTTPServer.HTTPServer(("", port), StatsServerHandler)
break
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE and port < max_port:
logging.info("Port %s in use, trying %s", port, port + 1)
else:
raise
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
class StatsServerInit(registry.InitHook):
"""Starts up a varz server after everything is registered."""
def RunOnce(self):
"""Main method of this registry hook.
StatsServer implementation may be overriden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
# Figure out which port to use.
port = config_lib.CONFIG["Monitoring.http_port"]
if port != 0:
logging.info("Starting monitoring server on port %d.", port)
# pylint: disable=g-import-not-at-top
from grr.lib import local as local_overrides
# pylint: enable=g-import-not-at-top
if "stats_server" in dir(local_overrides):
stats_server = local_overrides.stats_server.StatsServer(port)
logging.debug("Using local StatsServer from %s", local_overrides)
else:
stats_server = StatsServer(port)
stats_server.Start()
else:
logging.info("Monitoring server disabled.")
|
__init__.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import warnings
from threading import Thread
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
# from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb') # text-file, TensorBoard, Weights & Biases
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
services.py
|
#
# Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
import sys
import socket
import logging
import asyncio
import binascii
import ipaddress
import platform
import queue
from threading import Thread
from struct import pack, unpack
from ..common.sfc_globals import sfc_globals
from ..nsh.common import VXLANGPE, BASEHEADER, CONTEXTHEADER, ETHHEADER, TRACEREQHEADER, NSH_NEXT_PROTO_IPV4
from ..nsh.common import PAYLOAD_START_INDEX_NSH_TYPE1, NSH_NEXT_PROTO_ETH
from ..nsh.common import PAYLOAD_START_INDEX_NSH_TYPE3, IPV4_HEADER_LEN_BYTES
from ..nsh import decode as nsh_decode
from ..nsh.encode import add_sf_to_trace_pkt
from ..nsh.service_index import process_service_index
from service_instance.function.image_processing import *
from service_instance.service_host import ServiceHost
# from service_instance.performance_statistic import *
import matplotlib as mpl
mpl.rcParams['toolbar'] = 'None'
__author__ = "Jim Guichard, Reinaldo Penno"
__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
__version__ = "0.3"
__email__ = "jguichar@cisco.com, rapenno@gmail.com"
__status__ = "beta"
"""
All supported services
"""
logger = logging.getLogger(__name__)
#: Global flags used for indication of current packet processing status
# Packet needs more processing within this SFF
PACKET_CHAIN = 0b00000000
# Packet was sent to another SFF or service function
PACKET_CONSUMED = 0b00000001
# Packet will be dropped
PACKET_ERROR = 0b00000010
# Referenced service function is invalid
SERVICE_HOP_INVALID = 0xDEADBEEF
#: Services names
FWL = 'firewall'
NAT = 'napt44'
DPI = 'dpi'
QOS = 'qos'
IDS = 'ids'
SF = 'sf'
SFF = 'sff'
CUDP = 'cudp'
# HISTOGRAM = 'histogram'
RELIABLE = 'reliable'
# Service for display
IMAGE_CROP = 'image_crop'
GRAY_SCALE = 'gray_scale'
FACE_DETECT = 'face_detect'
EYE_DETECT = 'eye_detect'
# For VxLAN-gpe
GPE_NP_NSH = 0x4
ETH_P_NSH_0 = 0x89
ETH_P_NSH_1 = 0x4f
def find_service(service_type):
"""Service dispatcher - get service class based on its type
:param service_type: service type
:type service_type: str
:return `:class:Baseservice`
"""
if service_type == FWL:
return MyFwService
elif service_type == NAT:
return MyNatService
elif service_type == DPI:
return MyDpiService
elif service_type == SFF:
return MySffServer
elif service_type == CUDP:
return ControlUdpServer
elif service_type == QOS or service_type == IDS:
# return a generic service for currently unimplemented services
return MyService
elif service_type == IMAGE_CROP:
return MyImageCropService
elif service_type == GRAY_SCALE:
return MyGrayScaleService
elif service_type == FACE_DETECT:
return MyFaceDetectService
elif service_type == EYE_DETECT:
return MyEyeDetectService
else:
raise ValueError('Service "%s" not supported' % service_type)
class BasicService(object):
def __init__(self, loop):
"""
Service Blueprint Class
:param loop:
:type loop: `:class:asyncio.unix_events._UnixSelectorEventLoop`
"""
self.loop = loop
self.transport = None
self.server_vxlan_values = VXLANGPE()
self.server_eth_before_nsh_values = ETHHEADER()
self.is_eth_nsh = False
self.server_base_values = BASEHEADER()
self.server_ctx_values = CONTEXTHEADER()
self.server_eth_values = ETHHEADER()
self.server_trace_values = TRACEREQHEADER()
# MUST be set by EACH descendant class
self.service_type = None
self.service_name = None
self.packet_queue = queue.Queue()
self.sending_thread = Thread(target=self.read_queue)
self.sending_thread.daemon = True
self.sending_thread.start()
def set_name(self, name):
self.service_name = name
def get_name(self):
"""
:return service name which is the same as SF/SFF name
:rtype: str
"""
return self.service_name
def _decode_headers(self, data):
"""
Procedure for decoding packet headers.
Decode the incoming packet for debug purposes and to strip out various
header values.
"""
offset = 0
# decode vxlan-gpe header
nsh_decode.decode_vxlan(data, offset, self.server_vxlan_values)
offset += 8
# decode ETH header before NSH if exists
if self.server_vxlan_values.next_protocol == GPE_NP_NSH:
nsh_decode.decode_ethheader(data, offset, self.server_eth_before_nsh_values)
if ((self.server_eth_before_nsh_values.ethertype0 == ETH_P_NSH_0) and
(self.server_eth_before_nsh_values.ethertype1 == ETH_P_NSH_1)):
self.is_eth_nsh = True
offset += 14
else:
self.is_eth_nsh = False
# decode NSH base header
nsh_decode.decode_baseheader(data, offset, self.server_base_values)
offset += 8
# decode NSH context headers
nsh_decode.decode_contextheader(data, offset, self.server_ctx_values)
offset += 16
# decode NSH eth headers
nsh_decode.decode_ethheader(data, offset, self.server_eth_values)
# decode common trace header
if self.is_eth_nsh:
offset = 8 + 14
else:
offset = 8
if nsh_decode.is_trace_message(data, offset):
offset += 24
nsh_decode.decode_trace_req(data, offset, self.server_trace_values)
def _process_incoming_packet(self, data, addr):
"""
Decode NSH headers and process service index
:param data: packet payload
:type data: bytes
:param addr: IP address and port to which data are passed
:type addr: tuple
"""
logger.debug('%s: Processing received packet(basicservice) service name :%s',
self.service_type, self.service_name)
self._decode_headers(data)
if self.is_eth_nsh:
offset = 8 + 14
else:
offset = 8
rw_data = bytearray(data)
rw_data, _ = process_service_index(rw_data, offset, self.server_base_values)
sfc_globals.sf_processed_packets += 1
return rw_data
def _update_metadata(self, data,
network_platform=None, network_shared=None,
service_platform=None, service_shared=None):
"""
Update NSH context header in received packet data
:param data: packet data
:type data: bytes
:param network_platform: new network_platform value
:type network_platform: int
:param network_shared: new network_shared value
:type network_shared: int
:param service_platform: new service_platform value
:type service_platform: int
:param service_shared: new service_shared value
:type service_shared: int
:return bytearray
"""
if network_platform is not None:
self.server_ctx_values.network_platform = network_platform
if network_shared is not None:
self.server_ctx_values.network_shared = network_shared
if service_platform is not None:
self.server_ctx_values.service_platform = service_platform
if service_shared is not None:
self.server_ctx_values.service_shared = service_shared
new_ctx_header = pack('!I I I I',
self.server_ctx_values.network_platform,
self.server_ctx_values.network_shared,
self.server_ctx_values.service_platform,
self.server_ctx_values.service_shared)
data = bytearray(data)
data[16:32] = new_ctx_header
return data
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
"""
Put received packet into the internal queue
:param data: packet data
:type data: bytes
:param addr: IP address and port to which data are passed
:type addr: tuple
"""
# logger.info('[%s] service received packet from %s:', self.service_type, addr)
# logger.info('[%s] the data in packet is: %s', self.service_type, data[60:].decode('utf-8'))
packet = (data, addr)
try:
self.packet_queue.put_nowait(packet)
except Exception:
msg = 'Putting into queue failed'
# logger.info(msg)
logger.exception(msg)
if self.service_type == DPI:
sfc_globals.sf_queued_packets += 1
else:
sfc_globals.sff_queued_packets += 1
def process_datagram(self, data, addr):
"""
Forward received packet accordingly based on its type
:param data: packet data
:type data: bytes
:param addr: IP address and port to which data are passed
:type addr: tuple
"""
logger.info('[%s] service received packet from %s:', self.service_type, addr)
logger.info('[%s] the data in packet is: %s', self.service_type, data[60:].decode('utf-8'))
rw_data = self._process_incoming_packet(data, addr)
# TODO: 按照这里的处理rw_data可以修改服务链中的报文内容
# rw_data += "[processed by {}]".format(self.service_type).encode('utf-8')
if self.is_eth_nsh:
offset = 8 + 14
else:
offset = 8
if nsh_decode.is_data_message(data, offset):
# Must send it to UDP port of VxLAN-gpe
# if nsh_decode.is_vxlan_nsh_legacy_message(data, 0):
# Disregard source port of received packet and send packet back to 6633
addr_l = list(addr)
# TODO: Bug修正,送回SFF,这个端口应该是SFF的端口,正常情况下不应该写死,而应该从ODL的SFC信息中获得
addr_l[1] = 4789
addr = tuple(addr_l)
self.transport.sendto(rw_data, addr)
# logger.info('[%s] sending packets to %s', self.service_type, addr)
elif nsh_decode.is_trace_message(data, offset):
# Add SF information to packet
if self.server_base_values.service_index == self.server_trace_values.sil:
trace_pkt = add_sf_to_trace_pkt(rw_data, self.service_type, self.service_name)
self.transport.sendto(trace_pkt, addr)
# Send packet back to SFF
else:
self.transport.sendto(rw_data, addr)
def read_queue(self):
"""
Read received packet from the internal queue
"""
try:
while True:
packet = self.packet_queue.get(block=True)
self.process_datagram(data=packet[0], addr=packet[1])
self.packet_queue.task_done()
except Exception:
msg = 'Reading from queue failed'
logger.info(msg)
logger.exception(msg)
raise
def process_trace_pkt(self, rw_data, data):
logger.info('%s: Sending trace report packet', self.service_type)
ipv6_addr = ipaddress.IPv6Address(data[
NSH_OAM_TRACE_DEST_IP_REPORT_OFFSET:NSH_OAM_TRACE_DEST_IP_REPORT_OFFSET + NSH_OAM_TRACE_DEST_IP_REPORT_LEN]) # noqa
if ipv6_addr.ipv4_mapped:
ipv4_str_trace_dest_addr = str(ipaddress.IPv4Address(self.server_trace_values.ip_4))
trace_dest_addr = (ipv4_str_trace_dest_addr, self.server_trace_values.port)
logger.info("IPv4 destination:port address for trace reply: %s", trace_dest_addr)
self.transport.sendto(rw_data, trace_dest_addr)
else:
ipv6_str_trace_dest_addr = str(ipaddress.IPv6Address(ipv6_addr))
trace_dest_addr = (ipv6_str_trace_dest_addr, self.server_trace_values.port)
logger.info("IPv6 destination address for trace reply: %s", trace_dest_addr)
self.transport.sendto(rw_data, trace_dest_addr)
@staticmethod
def connection_refused(exc):
logger.error('Connection refused: %s', exc)
def connection_lost(self, exc):
logger.warning('Closing transport', exc)
loop = asyncio.get_event_loop()
loop.stop()
@staticmethod
def error_received(exc):
logger.error('Error received:', exc)
class MyService(BasicService):
def __init__(self, loop):
super(MyService, self).__init__(loop)
self.service_type = 'generic'
class MyFwService(BasicService):
def __init__(self, loop):
super(MyFwService, self).__init__(loop)
self.service_type = FWL
class MyNatService(BasicService):
def __init__(self, loop):
super(MyNatService, self).__init__(loop)
self.service_type = NAT
class MyDpiService(BasicService):
def __init__(self, loop):
super(MyDpiService, self).__init__(loop)
self.service_type = DPI
class ControlUdpServer(BasicService):
def __init__(self, loop):
"""
This control server class listen on a socket for commands from the main
process. For example, if a SFF is deleted the main program can send a
command to this data plane thread to exit.
"""
# super(ControlUdpServer, self).__init__(loop)
self.loop = loop
self.transport = None
self.service_name = None
self.service_type = 'Control UDP Server'
def datagram_received(self, data, addr):
logger.info('%s received a packet from: %s', self.service_type, addr)
self.loop.call_soon_threadsafe(self.loop.stop)
# data = data.decode('utf-8')
# print(data_plane_path)
# sfp_topo = json.loads(data)
# print(sfp_topo)
# print(sfp_topo['3']['3'])
def connection_lost(self, exc):
logger.error('stop: %s', exc)
class MySffServer(BasicService):
def __init__(self, loop):
"""
This is the main SFF server. It receives VXLAN GPE packets, calls
packet processing function and finally sends them on their way
"""
super(MySffServer, self).__init__(loop)
self.service_type = 'SFF Server'
def _lookup_next_sf(self, service_path, service_index):
"""
Retrieve next SF locator info from SfcGlobals
:param service_path: service path identifier
:type service_path: int
:param service_index: service index
:type service_index: int
:return dict or hex
:rtype: tuple
"""
next_hop = SERVICE_HOP_INVALID
# First we determine the list of SFs in the received packet based on
# SPI value extracted from packet
try:
local_data_plane_path = sfc_globals.get_data_plane_path()
sff_name = super(MySffServer, self).get_name()
next_hop = local_data_plane_path[sff_name][service_path][service_index]
except KeyError:
logger.info('Could not determine next service hop. SP: %d, SI: %d',
service_path, service_index)
pass
return next_hop
@staticmethod
def _get_packet_bearing(packet):
"""
Parse a packet to get source and destination info
CREDITS: http://www.binarytides.com/python-packet-sniffer-code-linux/
:param packet: received packet (IP header and upper layers)
:type packet: bytes
:return dict or None
"""
ip_header = packet[:20]
iph = unpack('!BBHHHBBH4s4s', ip_header)
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
if protocol == 6:
tcp_header = packet[20:40]
protocolh = unpack('!HHLLBBHHH', tcp_header)
elif protocol == 17:
udp_header = packet[20:28]
protocolh = unpack('!HHHH', udp_header)
else:
logger.error('Only TCP and UDP protocls are supported')
return
s_port = protocolh[0]
d_port = protocolh[1]
return {'s_addr': s_addr,
's_port': s_port,
'd_addr': d_addr,
'd_port': d_port}
def _process_incoming_packet(self, data, addr):
"""
SFF main packet processing function
:param data: UDP payload
:type data: bytes
:param addr: IP address and port to which data are passed
:type addr: tuple (str, int)
"""
# logger.debug("%s: mysff Processing packet from: %s", self.service_type, addr)
address = ()
rw_data = bytearray(data)
self._decode_headers(data)
logger.debug("%s: Packet dump: %s", self.service_type, binascii.hexlify(rw_data))
sfc_globals.sff_processed_packets += 1
# logger.info('*******(mysff server) received packets "%d"', sfc_globals.received_packets)
# Lookup what to do with the packet based on Service Path Identifier
next_hop = self._lookup_next_sf(self.server_base_values.service_path,
self.server_base_values.service_index)
if self.is_eth_nsh:
offset = 8 + 14
else:
offset = 8
if nsh_decode.is_data_message(data, offset):
# send the packet to the next SFF based on address
if next_hop != SERVICE_HOP_INVALID:
address = next_hop['ip'], next_hop['port']
# logger.info("%s: Sending packets to: %s", self.service_type, address)
self.transport.sendto(rw_data, address)
# send packet to its original destination
elif self.server_base_values.service_index:
# logger.info("%s: End of path", self.service_type)
logger.debug("%s: Packet dump: %s", self.service_type, binascii.hexlify(rw_data))
# logger.debug('%s: service index end up as: %d', self.service_type,
# self.server_base_values.service_index)
# Remove all SFC headers, leave only original packet
if self.server_base_values.next_protocol == NSH_NEXT_PROTO_IPV4:
payload_start_index = PAYLOAD_START_INDEX_NSH_TYPE1
elif self.server_base_values.next_protocol == NSH_NEXT_PROTO_ETH:
payload_start_index = PAYLOAD_START_INDEX_NSH_TYPE3
else:
logger.error("\nCan not determine NSH next protocol\n")
return rw_data, address
inner_packet = rw_data[payload_start_index:]
if inner_packet:
# euid = os.geteuid()
# if euid != 0:
# print("Script not started as root. Running sudo...")
# args = ['sudo', sys.executable] + sys.argv + [os.environ]
# # the next line replaces the currently-running process with the sudo
# os.execlpe('sudo', *args)
# Reinaldo note:
# Unfortunately it has to be this way. Python has poor raw socket support in
# MacOS. What happens is that MacoS will _always_ include the IP header unless you use
# socket option IP_HDRINCL
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man4/ip.4.html
#
# But if you try to set this option at the Python level (instead of C level) it does not
# work. the only way around is to create a raw socket of type UDP and leave the IP header
# out when sending/building the packet.
sock_raw = None
bearing = self._get_packet_bearing(inner_packet)
try:
if platform.system() == "Darwin":
# Assuming IPv4 packet for now. Move pointer forward
inner_packet = rw_data[payload_start_index + IPV4_HEADER_LEN_BYTES:]
sock_raw = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_UDP)
else:
# sock_raw = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
sock_raw = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as msg:
logger.error("[SFF] Socket could not be created. Error Code : %s", msg)
# sys.exit()
logger.info("[SFF] End of Chain. Sending packet to %s %s", bearing['d_addr'], bearing['d_port'])
try:
sock_raw.sendto(inner_packet[:], (bearing['d_addr'],
int(bearing['d_port'])))
except AttributeError as e:
logger.error("[SFF] Stop sending package because of error. Parameters: %s %s %s",
socket.AF_INET,
socket.SOCK_RAW,
platform.system())
except BrokenPipeError as e:
logger.error("[SFF] Stop sending package because of Broken pipe. IP: %s Port: %s",
bearing['d_addr'],
int(bearing['d_port']))
# end processing as Service Index reaches zero (SI = 0)
else:
logger.error("%s: Loop Detected", self.service_type)
logger.debug("%s: Packet dump: %s", self.service_type, binascii.hexlify(rw_data))
rw_data.__init__()
data = ""
elif nsh_decode.is_trace_message(data, offset):
# Have to differentiate between no SPID and End of path
service_index = self.server_base_values.service_index
if (self.server_trace_values.sil == service_index) or (next_hop == SERVICE_HOP_INVALID):
# End of trace
super(MySffServer, self).process_trace_pkt(rw_data, data)
else:
# Trace will continue
address = next_hop['ip'], next_hop['port']
# logger.info("%s: Sending trace packet to: %s", self.service_type, address)
# send the packet to the next SFF based on address
self.transport.sendto(rw_data, address)
# logger.info("%s: Listening for NSH packets ...", self.service_type)
# logger.info('%s: Finished processing packet from: %s', self.service_type, addr)
return rw_data, address
def process_datagram(self, data, addr):
"""
Process received packet
:param data: packet data
:type data: bytes
:param addr: IP address and port to which data are passed
:type addr: tuple
"""
# logger.info('%s: Received a packet from: %s', self.service_type, addr)
self._process_incoming_packet(data, addr)
def connection_lost(self, exc):
logger.error('stop', exc)
@staticmethod
def error_received(exc):
logger.error('Error received:', exc)
class MyReliableConnectionService(BasicService):
"""
A service for testing reliable connection in SFC.
"""
def __init__(self, loop):
super(MyReliableConnectionService, self).__init__(loop)
self.service_type = RELIABLE
self.host = ServiceHost()
self.address = None
self.header = None
def process_datagram(self, data, addr):
self.address = addr
self.header = data[:60]
self.store_and_process(data, addr)
def send(self, data):
logger.info("[%s] sending data: %s", self.service_type, data)
super(MyReliableConnectionService, self).process_datagram(self.header + data, self.address)
def store_and_process(self, data, addr):
data = data[60:]
serial = self.host.get_serial(data)
end = self.host.buffer(flow_id=serial,
recv_buffer=data,
ack_address=str(addr[0]))
if end:
logger.info("[%s] Receive full message with serial %s!!!", self.service_type, serial)
flow_data = self.host.fetch(serial, serial)
# do something with the data
flow_data = self.process_data(flow_data)
send_thread = Thread(target=self.host.sendto, args=(flow_data, self.send))
send_thread.start()
send_thread.join()
else:
logger.info("[%s] Receive part message with serial %s", self.service_type, serial)
def process_data(self, data):
return data
class MyImageCropService(MyReliableConnectionService):
def __init__(self, loop):
super(MyImageCropService, self).__init__(loop)
self.service_type = IMAGE_CROP
def process_data(self, data):
image = decode_base64_image(data)
size = np.shape(image)
logger.debug("[%s] receive image with size: %s", self.service_type, size)
x = size[0]
y = size[1]
image = image[int(0.1 * x):int(0.9 * x), int(0.1 * y):int(0.9 * y)]
# cv2.imshow("Image Crop", image)
# cv2.waitKey(1)
fig = plt.figure("Image Crop", clear=True, figsize=(1.6 * 5, 0.9 * 5))
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.ion()
plt.show()
plt.imshow(image, cmap="gray", interpolation='none')
plt.draw()
plt.pause(0.001)
return encode_image_to_base64(image)
class MyGrayScaleService(MyReliableConnectionService):
def __init__(self, loop):
super(MyGrayScaleService, self).__init__(loop)
self.service_type = GRAY_SCALE
def process_data(self, data):
image = decode_base64_image(data)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
logger.debug("[gray scale] Receive image size: %s", np.shape(image))
cv2.putText(gray, "Processed By [Gray Scale] Service", (0, 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
fig = plt.figure("Gray_Scale", clear=True, figsize=(1.6 * 3, 0.9 * 3))
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.ion()
plt.show()
plt.imshow(gray, cmap="gray", interpolation='none')
plt.draw()
plt.pause(0.001)
return encode_image_to_base64(gray)
class MyDetectService(MyReliableConnectionService):
def __init__(self, loop):
super(MyDetectService, self).__init__(loop)
self.service_type = "detect"
self.logger = logging.getLogger("Detect_Service")
log_file = logging.FileHandler("detect_service.log", "w")
# formatter = logging.Formatter(
# '[%(asctime)s] - [logger name :%(name)s] - [%(filename)s file line:%(lineno)d] - %(levelname)s: %(message)s')
formatter = logging.Formatter(
'[%(asctime)s] [Object Detect Result (x, y, w, h)]: %(message)s')
log_file.setFormatter(formatter)
self.logger.addHandler(log_file)
self.logger.setLevel(logging.DEBUG)
def process_data(self, data):
raise NotImplementedError
def detect(self, data, config_file):
from service_instance.function.haar_cascade_object_detection import haar_cascade_detect
current_image = decode_base64_image(data)
faces = haar_cascade_detect(current_image, config_file)
for (x, y, w, h) in faces:
current_image = cv2.rectangle(current_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
self.logger.info("%s", faces)
cv2.putText(current_image, "Processed By [Object Detect] Service", (0, 45), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
# cv2.imshow("Object Detect", current_image)
# cv2.waitKey(1)
return encode_image_to_base64(current_image)
class MyFaceDetectService(MyDetectService):
def process_data(self, data):
config_file = "service_instance/function/.haarcascade/haarcascade_frontalface_default.xml"
logger.debug("[Face Detect] Receive image with size: %s", np.shape(decode_base64_image(data)))
return self.detect(data, config_file)
class MyEyeDetectService(MyDetectService):
def process_data(self, data):
config_file = "service_instance/function/.haarcascade/haarcascade_eye_tree_eyeglasses.xml"
logger.debug("[Eye Detect] Receive image with size: %s", np.shape(decode_base64_image(data)))
return self.detect(data, config_file)
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections.abc import Sequence, Mapping
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import Any, Dict, Iterable, Iterator, Optional, Union, List, Tuple, Type, TypeVar, Callable
from unittest.mock import MagicMock
import numpy as np
import expecttest
from torch.testing._comparison import (
assert_equal as assert_equal,
Pair,
TensorLikePair,
BooleanPair,
NumberPair,
UnsupportedInputs,
NonePair,
ErrorMeta,
)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.xnnpack
from enum import Enum
from statistics import mean
import functools
from .composite_compliance import no_dispatch
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.nn import ModuleList, ModuleDict, Sequential, ParameterList, ParameterDict
from torch._C import ScriptList, ScriptDict # type: ignore[attr-defined]
from torch.onnx import (register_custom_op_symbolic,
unregister_custom_op_symbolic)
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
IS_CI = bool(os.getenv('CI'))
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
NATIVE_DEVICES = ('cpu', 'cuda', 'meta')
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
'decorators is not supported. For test "{}", the following parameters '
'are handled multiple times: {}'.format(
test.__name__, redundant_params))
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
yield (new_test, merged_test_name, full_param_kwargs)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
You can also use it as a class decorator. E.g.
```
@instantiate_parametrized_tests
class TestFoo(TestCase):
...
```
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
return generic_cls
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names: List[str] = [s.strip() for s in arg_str.split(',')]
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
test_name = self._get_subtest_name(values, explicit_name=maybe_name)
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "")
parser = argparse.ArgumentParser(add_help=not is_running_via_run_test)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = []
if IMPORT_DISABLED_TESTS:
other_args.append('--import-disabled-tests')
if IMPORT_SLOW_TESTS:
other_args.append('--import-slow-tests')
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
from xmlrunner.result import _XMLTestResult # type: ignore[import]
class XMLTestResultVerbose(_XMLTestResult):
"""
Adding verbosity to test outputs:
by default test summary prints 'skip',
but we want to also print the skip reason.
GH issue: https://github.com/pytorch/pytorch/issues/69014
This works with unittest_xml_reporting<=3.2.0,>=2.0.0
(3.2.0 is latest at the moment)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def addSkip(self, test, reason):
super().addSkip(test, reason)
for c in self.callback.__closure__:
if isinstance(c.cell_contents, str) and c.cell_contents == 'skip':
# this message is printed in test summary;
# it stands for `verbose_str` captured in the closure
c.cell_contents = f"skip: {reason}"
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(
output=test_report_path,
verbosity=2 if verbose else 1,
resultclass=XMLTestResultVerbose))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
IS_X86 = platform.machine() in ('x86_64', 'i386')
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_CUDA = torch.cuda.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Enables crossref tests, in addition to standard tests which
# are being run. crossref tests work by installing a torch
# function mode that runs extra compute alongside the regular
# computation that happens with the test. After both computations
# are done, we cross-reference them (thus the name) to check for
# correction, before throwing out the extra compute and proceeding
# as we had before. By default, we don't run these tests.
TEST_WITH_CROSSREF = os.getenv('PYTORCH_TEST_WITH_CROSSREF', '0') == '1'
def skipIfCrossRef(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_CROSSREF:
raise unittest.SkipTest("test doesn't currently with crossref")
else:
fn(*args, **kwargs)
return wrapper
class CrossRefMode(torch.overrides.TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
r = func(*args, **kwargs)
return r
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# If this is True then CUDA memory leak checks are skipped. If this is false
# then CUDA memory leak checks are performed.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like
# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type.
# Especially when checking against a reference we can't be sure which variant we get, so we simply try both.
def numpy_to_torch_dtype(np_dtype):
try:
return numpy_to_torch_dtype_dict[np_dtype]
except KeyError:
return numpy_to_torch_dtype_dict[np_dtype.type]
def has_corresponding_torch_dtype(np_dtype):
try:
numpy_to_torch_dtype(np_dtype)
return True
except KeyError:
return False
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
torch_to_numpy_dtype_dict.update({
torch.bfloat16: np.float32,
torch.complex32: np.complex64
})
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfMps(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if torch.backends.mps.is_available():
raise unittest.SkipTest("test doesn't currently work with MPS")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoXNNPACK(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch.backends.xnnpack.enabled:
raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def _decide_skip_caffe2(expect_caffe2, reason):
def skip_dec(func):
@wraps(func)
def wrapper(self):
if torch.onnx._CAFFE2_ATEN_FALLBACK != expect_caffe2:
raise unittest.SkipTest(reason)
return func(self)
return wrapper
return skip_dec
skipIfCaffe2 = _decide_skip_caffe2(False, "Not compatible with Caffe2")
skipIfNoCaffe2 = _decide_skip_caffe2(True, "Caffe2 is not available")
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with no_dispatch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
with no_dispatch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = ("CUDA driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if TEST_WITH_ROCM:
warnings.warn(msg)
else:
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
# Used in check_if_enable to see if a test method should be disabled by an issue,
# sanitizes a test method name from appended suffixes by @dtypes parametrization.
# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should
# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32
def remove_device_and_dtype_suffixes(test_name: str) -> str:
# import statement is localized to avoid circular dependency issues with common_device_type.py
from torch.testing._internal.common_device_type import get_device_type_test_bases
device_suffixes = [x.device_type for x in get_device_type_test_bases()]
dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()]
test_name_chunks = test_name.split("_")
if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes:
if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes:
return "_".join(test_name_chunks[0:-2])
return "_".join(test_name_chunks[0:-1])
return test_name
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
raw_test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and raw_test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
sanitized_test_method_name = remove_device_and_dtype_suffixes(test._testMethodName)
if not IS_SANDCASTLE and disabled_tests_dict is not None:
for disabled_test, (issue_url, platforms) in disabled_tests_dict.items():
disable_test_parts = disabled_test.split()
if len(disable_test_parts) > 1:
disabled_test_name = disable_test_parts[0]
disabled_test_suite = disable_test_parts[1][1:-1]
# if test method name or its sanitized version exactly matches the disabled test method name
# AND allow non-parametrized suite names to disable parametrized ones (TestSuite disables TestSuiteCPU)
if (test._testMethodName == disabled_test_name or sanitized_test_method_name == disabled_test_name) \
and disabled_test_suite in test_suite:
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \
"If you're seeing this on your local machine and would like to enable this test, " \
"please make sure CI is not set and you are not using the flag --import-disabled-tests."
raise unittest.SkipTest(skip_msg)
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very
# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of
# `torch.testing._comparison.assert_equal`, used for example by the public testing function
# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence
# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only
# change the supported inputs, but the comparison logic is the same.
# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation.
class RelaxedBooleanPair(BooleanPair):
"""Pair for boolean-like inputs.
In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single
element tensor-like.
"""
_supported_number_types = NumberPair(0, 0)._supported_types
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a
# number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_bool(input, id=id) for input in (actual, expected)]
def _to_bool(self, bool_like, *, id):
if isinstance(bool_like, np.number):
return bool(bool_like.item())
elif type(bool_like) in self._supported_number_types:
return bool(bool_like)
elif isinstance(bool_like, (torch.Tensor, np.ndarray)):
numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a boolean. "
f"Got {numel} elements instead.",
id=id,
)
return bool(bool_like.item())
else:
return super()._to_bool(bool_like, id=id)
class RelaxedNumberPair(NumberPair):
"""Pair for number-like inputs.
In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element
tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when
``check_dtype=True`` is passed.
In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also
supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and
``@toleranceOverride`` decorators.
"""
_TYPE_TO_DTYPE = {
int: torch.int64,
float: torch.float32,
complex: torch.complex64,
}
def __init__(
self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters
) -> None:
super().__init__(actual, expected, check_dtype=False, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a number and the other can also be a number or a single
# element tensor or array, whereas in default NumberPair both inputs have to be numbers.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_number(input, id=id) for input in (actual, expected)]
def _to_number(self, number_like, *, id):
if isinstance(number_like, (torch.Tensor, np.ndarray)):
numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a number. "
f"Got {numel} elements instead.",
id=id,
)
number = number_like.item()
if isinstance(number, bool):
number = int(number)
return number
elif isinstance(number_like, Enum):
return int(number_like) # type: ignore[call-overload]
else:
return super()._to_number(number_like, id=id)
class TensorOrArrayPair(TensorLikePair):
"""Pair for tensor-like inputs.
On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of
:class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a
tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their
relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine.
In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride``
and ``@toleranceOverride`` decorators.
"""
def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters):
super().__init__(actual, expected, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id, allow_subclasses):
self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray))
actual, expected = [self._to_tensor(input) for input in (actual, expected)]
for tensor in (actual, expected):
self._check_supported(tensor, id=id)
return actual, expected
class UnittestPair(Pair):
"""Fallback ABC pair that handles non-numeric inputs.
To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in
order to use it with the :class:`Pair` "framework" from :func:`assert_equal`.
Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support.
"""
CLS: Union[Type, Tuple[Type, ...]]
TYPE_NAME: Optional[str] = None
def __init__(self, actual, expected, **other_parameters):
self._check_inputs_isinstance(actual, expected, cls=self.CLS)
super().__init__(actual, expected, **other_parameters)
def compare(self):
test_case = unittest.TestCase()
try:
return test_case.assertEqual(self.actual, self.expected)
except test_case.failureException as error:
msg = str(error)
type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__
raise self._make_error_meta(AssertionError, f"{type_name.title()} comparison failed: {msg}")
class StringPair(UnittestPair):
CLS = string_classes
TYPE_NAME = "string"
class SetPair(UnittestPair):
CLS = set
class TypePair(UnittestPair):
CLS = type
class ObjectPair(UnittestPair):
CLS = object
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr)
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True, num_red=0, num_green=0):
using_unittest = isinstance(result, unittest.TestResult)
if num_runs_left == 0:
if num_green > 0 and num_red > 0 and using_unittest:
result.addSkip(self, f'{{"flaky": {True}, "num_red": {num_red}, "num_green": {num_green},' +
f'"max_num_retries": {MAX_NUM_RETRIES}}}')
return
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
if result.wasSuccessful():
case = TestCase()
if TEST_SAVE_XML is not None:
# This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo
# Create dummy TestInfo to record results correctly
from xmlrunner.result import _TestInfo # type: ignore[import]
case = _TestInfo(result, case)
case.output = _TestInfo.ERROR
case.elapsed_time = 0.0
case.test_description = "TestSuiteEarlyFailure"
# This shouldn't really happen, but if does add fake failure
# For more details see https://github.com/pytorch/pytorch/issues/71973
result.failures.append((case, "TestSuite execution was aborted early"))
assert result.wasSuccessful() is False
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
_, traceback_str = result.failures.pop(-1)
print(traceback_str)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red + 1, num_green=num_green)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
_, traceback_str = result.errors.pop(-1)
print(traceback_str)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red + 1, num_green=num_green)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red, num_green=num_green + 1)
elif not report_only and num_retries_left < MAX_NUM_RETRIES:
# in this case, our test was rerun (as a retry has been used) and it just passed.
# we incur one more recursive call with num_runs_left = 0 to allow for accurate flaky reporting
self._run_with_retry(result=result, num_runs_left=0, report_only=report_only,
num_red=num_red, num_green=num_green + 1)
def run(self, result=None):
with contextlib.ExitStack() as stack:
if TEST_WITH_CROSSREF:
stack.enter_context(torch.overrides.push_torch_function_mode(CrossRefMode))
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(
result=result,
num_runs_left=num_runs,
report_only=not OVERRIDE_FLAKY_SIGNAL,
num_red=0,
num_green=0)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols)
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=()):
from operator import mul
from functools import reduce
sparse_dim = 2
assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments'
assert len(size) >= sparse_dim
if blocksize:
assert len(blocksize) == 2, (size, blocksize)
assert size[-2] % blocksize[0] == 0, (size, blocksize)
assert size[-1] % blocksize[1] == 0, (size, blocksize)
blocksize0, blocksize1 = blocksize
else:
blocksize0 = blocksize1 = 1
def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz):
compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype)
plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_compressed_dims):
count = compressed_indices[i + 1] - compressed_indices[i]
plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort(
torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor((nnz,) + blocksize, device=device, dtype=dtype, low=low, high=high)
return values, compressed_indices, plain_indices
batch_shape = size[:-2]
n_batch = reduce(mul, batch_shape, 1)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_compressed_dims, n_plain_dims = size[-2] // blocksize0, size[-1] // blocksize1
else:
n_compressed_dims, n_plain_dims = size[-1] // blocksize1, size[-2] // blocksize0
blocknnz = nnz // (blocksize0 * blocksize1)
sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)]
sparse_tensors_it = map(list, zip(*sparse_tensors))
values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize)
compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
return torch.sparse_compressed_tensor(compressed_indices, plain_indices,
values, size=size, dtype=dtype, layout=layout, device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=())
def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=())
def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize)
def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
i1 = i[:, :(nnz // 2), ...]
i2 = i[:, :((nnz + 1) // 2), ...]
i = torch.cat([i1, i2], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
# coalesce is only implemented for COO
if t.layout == torch.sparse_coo:
t = t.coalesce()
return t.to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
numpy_sample = sample_input.numpy()
n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False, **kwargs)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def assertEqual(
self,
x,
y,
msg: Optional[Union[str, Callable[[str], str]]] = None,
*,
atol: Optional[float] = None,
rtol: Optional[float] = None,
equal_nan=True,
exact_dtype=True,
# TODO: default this to True
exact_device=False,
exact_layout=False,
exact_stride=False,
exact_is_coalesced=False
):
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
# numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall
# back to an elementwise comparison. Note that this has to happen here and not for example in
# `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform
# multiple comparisons.
if any(
isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y)
):
def to_list(input):
return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input)
x = to_list(x)
y = to_list(y)
# When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here.
# Otherwise, the pair origination of `assert_equal` will fail, because the sequence is recognized as container
# that should be checked elementwise while the tensor is not.
elif isinstance(x, torch.Tensor) and isinstance(y, Sequence):
y = torch.as_tensor(y, dtype=x.dtype, device=x.device)
elif isinstance(x, Sequence) and isinstance(y, torch.Tensor):
x = torch.as_tensor(x, dtype=y.dtype, device=y.device)
assert_equal(
x,
y,
pair_types=(
NonePair,
RelaxedBooleanPair,
RelaxedNumberPair,
TensorOrArrayPair,
StringPair,
SetPair,
TypePair,
ObjectPair,
),
sequence_types=(
Sequence,
torch.storage._TypedStorage,
Sequential,
ModuleList,
ParameterList,
ScriptList,
torch.utils.data.dataset.Subset,
),
mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict),
rtol=rtol,
rtol_override=self.rel_tol,
atol=atol,
atol_override=self.precision,
equal_nan=equal_nan,
check_device=exact_device,
check_dtype=exact_dtype,
check_layout=exact_layout,
check_stride=exact_stride,
check_is_coalesced=exact_is_coalesced,
# This emulates unittest.TestCase's behavior if a custom message passed and
# TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage)
# is True (default)
msg=(lambda generated_msg: f"{generated_msg} : {msg}") if isinstance(msg, str) and self.longMessage else msg,
)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertAtenOp(self, onnx_model, operator, overload_name=""):
all_aten_nodes = [p for p in onnx_model.graph.node
if p.op_type == "ATen" and p.domain == "org.pytorch.aten"]
self.assertTrue(all_aten_nodes)
for op in all_aten_nodes:
attrs = {attr.name: attr.s.decode() for attr in op.attribute}
if attrs.get("operator") == operator:
break
self.assertEqual(attrs["operator"], operator)
self.assertEqual(attrs.get("overload_name", ""), overload_name)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove CI flag since this is a wrapped test process.
# CI flag should be set in the parent process only.
if "CI" in env.keys():
del env["CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# FIXME: modernize these to be consistent with make_tensor
# and review including them in torch.testing
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
zero_dim = t.ndim == 0
if zero_dim:
t = t.unsqueeze(0)
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
# Choose a "weird" value that won't be accessed
if t.dtype.is_floating_point or t.dtype.is_complex:
value = math.nan
elif t.dtype == torch.bool:
value = True
else:
value = 12
if zero_dim:
result[0] = value
result.set_(result.storage(), 1, (), ())
else:
result[..., 1::2] = value
strides = list(result.stride())
strides[-1] *= 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# Creates a full rank matrix with distinct singular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
# We choose the singular values to be "around one"
# This is to make the matrix well conditioned
# s = [2, 3, ..., k+1]
s = torch.arange(2, k + 2, dtype=real_dtype, device=device)
# s = [2, -3, 4, ..., (-1)^k k+1]
s[1::2] *= -1.
# 1 + 1/s so that the singular values are in the range [2/3, 3/2]
# This gives a condition number of 9/4, which should be good enough
s.reciprocal_().add_(1.)
# Note that the singular values need not be ordered in an SVD so
# we don't need need to sort S
x = (u * s.to(u.dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
# FIXME: remove this by updating test suites using it
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
# FIXME: remove this by updating test suites using it
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# FIXME: improve load_tests() documentation here
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
# FIXME: document this and move it to test_serialization
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0") == "1":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0") == "1":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# FIXME: delete this
# Using @toleranceOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
# FIXME: move to test_sparse or sparse utils
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if condition:
if IS_SANDCASTLE:
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return wrapper
else:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
# OpInfo utils
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration:
raise unittest.SkipTest('Skipped! Need at least 1 sample input')
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
@contextmanager
def custom_op(opname, symbolic_fn, opset_version):
"""Context manager/decorator to test ONNX export with custom oeprator"""
try:
register_custom_op_symbolic(opname, symbolic_fn, opset_version)
yield
finally:
unregister_custom_op_symbolic(opname, opset_version)
|
data_set_helpers.py
|
import pandas
import tensorflow as tf
from threading import Thread
from math import ceil
from six.moves import range
from util.audio import audiofile_to_input_vector
from util.gpu import get_available_gpus
from util.text import ctc_label_dense_to_sparse, text_to_char_array
class DataSets(object):
def __init__(self, train, dev, test):
'''Container for train, dev and test sets of one corpus.
Args:
train (DataSet): the train data set of the corpus
dev (DataSet): the validation data set of the corpus
test (DataSet): the test data set of the corpus
'''
self._dev = dev
self._test = test
self._train = train
def start_queue_threads(self, session):
self._dev.start_queue_threads(session)
self._test.start_queue_threads(session)
self._train.start_queue_threads(session)
@property
def train(self):
return self._train
@property
def dev(self):
return self._dev
@property
def test(self):
return self._test
class DataSet(object):
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
self._coord = None
self._numcep = numcep
self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
self._x_length = tf.placeholder(tf.int32, [])
self._y = tf.placeholder(tf.int32, [None,])
self._y_length = tf.placeholder(tf.int32, [])
self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
capacity=2 * self._get_device_count() * batch_size)
self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
self.batch_size = batch_size
self._numcontext = numcontext
self._thread_count = thread_count
self._files_list = self._create_files_list(files_list)
self._next_index = next_index
def _get_device_count(self):
available_gpus = get_available_gpus()
return max(len(available_gpus), 1)
def start_queue_threads(self, session, coord):
self._coord = coord
batch_threads = [Thread(target=self._populate_batch_queue, args=(session,)) for i in range(self._thread_count)]
for batch_thread in batch_threads:
self._coord.register_thread(batch_thread)
batch_thread.daemon = True
batch_thread.start()
return batch_threads
def close_queue(self, session):
session.run(self._close_op)
def _create_files_list(self, files_list):
# 1. Sort by wav filesize
# 2. Select just wav filename and transcript columns
# 3. Return a NumPy representation
return files_list.sort_values(by="wav_filesize") \
.ix[:, ["wav_filename", "transcript"]] \
.values
def _indices(self):
index = -1
while not self._coord.should_stop():
index = self._next_index(index) % len(self._files_list)
yield self._files_list[index]
def _populate_batch_queue(self, session):
for wav_file, transcript in self._indices():
source = audiofile_to_input_vector(wav_file, self._numcep, self._numcontext)
source_len = len(source)
target = text_to_char_array(transcript)
target_len = len(target)
try:
session.run(self._enqueue_op, feed_dict={
self._x: source,
self._x_length: source_len,
self._y: target,
self._y_length: target_len})
except tf.errors.CancelledError:
return
def next_batch(self):
source, source_lengths, target, target_lengths = self.example_queue.dequeue_many(self.batch_size)
sparse_labels = ctc_label_dense_to_sparse(target, target_lengths, self.batch_size)
return source, source_lengths, sparse_labels
@property
def total_batches(self):
# Note: If len(_files_list) % batch_size != 0, this re-uses initial files
return int(ceil(len(self._files_list) / self.batch_size))
class SwitchableDataSet(object):
def __init__(self, data_sets):
'''Data set that is wrapping a data sets instance to switch between train, dev and test instances during training.
Args:
data_sets (DataSets): the corpus container holding all three data sets
'''
self._data_sets = data_sets
self._sets = [data_sets.train, data_sets.dev, data_sets.test]
self._queues = [s.example_queue for s in self._sets]
self._queue_selector = tf.placeholder(tf.int32, name='Queue_Selector')
self._queue = tf.QueueBase.from_list(self._queue_selector, self._queues)
self._close_op = self._queue.close(cancel_pending_enqueues=True)
self._data_set = data_sets.train
def set_data_set(self, feed_dict, data_set):
index = self._sets.index(data_set)
assert index >= 0
feed_dict[self._queue_selector] = index
self._data_set = data_set
def start_queue_threads(self, session, coord):
batch_threads = []
for s in self._sets:
batch_threads += s.start_queue_threads(session, coord)
return batch_threads
def close_queue(self, session):
session.run(self._close_op, feed_dict={ self._queue_selector: 0 })
for s in self._sets:
s.close_queue(session)
def next_batch(self):
source, source_lengths, target, target_lengths = self._queue.dequeue_many(self._data_set.batch_size)
sparse_labels = ctc_label_dense_to_sparse(target, target_lengths, self._data_set.batch_size)
return source, source_lengths, sparse_labels
def read_data_sets(train_csvs, dev_csvs, test_csvs,
train_batch_size, dev_batch_size, test_batch_size,
numcep, numcontext, thread_count=8,
stride=1, offset=0, next_index=lambda s, i: i + 1,
limit_dev=0, limit_test=0, limit_train=0):
# Read the processed set files from disk
def read_csvs(csvs):
files = None
for csv in csvs:
file = pandas.read_csv(csv)
if files is None:
files = file
else:
files = files.append(file)
return files
train_files = read_csvs(train_csvs)
dev_files = read_csvs(dev_csvs)
test_files = read_csvs(test_csvs)
# Create train DataSet from all the train archives
train = _read_data_set(train_files, thread_count, train_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('train', i), limit=limit_train)
# Create dev DataSet from all the dev archives
dev = _read_data_set(dev_files, thread_count, dev_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('dev', i), limit=limit_dev)
# Create test DataSet from all the test archives
test = _read_data_set(test_files, thread_count, test_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('test', i), limit=limit_test)
# Return DataSets
return DataSets(train, dev, test)
def _read_data_set(filelist, thread_count, batch_size, numcep, numcontext, stride=1, offset=0, next_index=lambda i: i + 1, limit=0):
# Optionally apply dataset size limits
if limit > 0:
filelist = filelist.iloc[:limit]
filelist = filelist[offset::stride]
# Return DataSet
return DataSet(filelist, thread_count, batch_size, numcep, numcontext, next_index=next_index)
|
test_web_backtest.py
|
#!usr/bin/env python3
#-*- coding:utf-8 -*-
"""
@author: yanqiong
@file: test_web.py
@create_on: 2020/2/12
@description: "Users/yanqiong/Documents/geckodriver-v0.26.0-macos.tar.gz"
"""
import os
import sys
import time
import unittest
import multiprocessing as mp
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import date
from tqsdk import TqApi, TqBacktest, TargetPosTask
from tqsdk.exceptions import BacktestFinished
# 子进程要执行的代码
from tqsdk.test.api.helper import MockInsServer
def run_tianqin_code(port, queue):
try:
ins_url= "http://127.0.0.1:5000/t/md/symbols/2019-07-03.json"
api = TqApi(backtest=TqBacktest(start_dt=date(2019, 7, 10), end_dt=date(2019, 7, 20)), _ins_url=ins_url, web_gui="127.0.0.1:" + port)
queue.put("webready")
klines = api.get_kline_serial("DCE.m1912", 5 * 60, data_length=15)
target_pos = TargetPosTask(api, "DCE.m1912")
while True:
api.wait_update()
if api.is_changing(klines):
ma = sum(klines.close.iloc[-15:]) / 15
if klines.close.iloc[-1] > ma:
target_pos.set_target_volume(5)
elif klines.close.iloc[-1] < ma:
target_pos.set_target_volume(0)
except BacktestFinished as e:
while True:
api.wait_update()
except Exception as e:
api.close()
class WebTestOnChrome(unittest.TestCase):
def setUp(self) -> None:
self.ins = MockInsServer(5000)
self.chrome_options = ChromeOptions()
self.chrome_options.headless = True
ctx = mp.get_context('spawn')
self.port = "8082"
self.q = ctx.Queue()
self.tq_process = ctx.Process(target=run_tianqin_code, args=(self.port, self.q))
self.tq_process.start()
self.q.get()
def tearDown(self):
self.ins.close()
self.tq_process.terminate()
@unittest.skipIf(not sys.platform.startswith("win"), "test on win")
def test_on_win(self):
chromedriver_path = os.path.join(os.getenv("ChromeWebDriver"), "chromedriver.exe")
run_for_driver(webdriver.Chrome(executable_path=chromedriver_path, options=self.chrome_options), self)
@unittest.skipIf(not sys.platform.startswith("linux"), "test on linux")
def test_on_linux(self):
exe_path = os.path.join(os.getenv("CHROMEWEBDRIVER"), "chromedriver")
driver = webdriver.Chrome(executable_path=exe_path, options=self.chrome_options)
run_for_driver(driver, self)
@unittest.skipIf(not sys.platform.startswith("darwin"), "test on macos")
def test_on_macos(self):
run_for_driver(webdriver.Chrome(options=self.chrome_options), self)
class WebTestOnFirefox(unittest.TestCase):
def setUp(self) -> None:
self.ins = MockInsServer(5000)
self.firefox_options = FirefoxOptions()
self.firefox_options.headless = True
ctx = mp.get_context('spawn')
self.port = "8081"
self.q = ctx.Queue()
self.tq_process = ctx.Process(target=run_tianqin_code, args=(self.port, self.q))
self.tq_process.start()
self.q.get()
def tearDown(self):
self.ins.close()
self.tq_process.terminate()
@unittest.skipIf(not sys.platform.startswith("win"), "test on win")
def test_on_win(self):
geckodriver_path = os.path.join(os.getenv("GeckoWebDriver"), "geckodriver.exe")
run_for_driver(webdriver.Firefox(executable_path=geckodriver_path, options=self.firefox_options), self)
@unittest.skipIf(not sys.platform.startswith("linux"), "test on linux")
def test_on_linux(self):
exe_path = os.path.join(os.getenv("GECKOWEBDRIVER"), "geckodriver")
driver = webdriver.Firefox(executable_path=exe_path, options=self.firefox_options)
run_for_driver(driver, self)
@unittest.skipIf(not sys.platform.startswith("darwin"), "test on macos")
def test_on_macos(self):
run_for_driver(webdriver.Firefox(options=self.firefox_options), self)
def run_for_driver(driver, test):
driver.implicitly_wait(30)
driver.get("http://127.0.0.1:" + test.port)
wait = WebDriverWait(driver, 30)
wait.until(EC.title_is("tqsdk-python-web")) # k线图显示
logo = driver.find_element_by_tag_name("img")
test.assertEqual("Tianqin", logo.get_attribute("alt"))
# K线是否有成交箭头
chart_main_marks = driver.find_element_by_css_selector("svg.tqchart>g.root g.main.marks")
trade_arrow_paths = chart_main_marks.find_element_by_css_selector("g.tradearrow")
wait = WebDriverWait(driver, 30)
wait.until(element_has_child(trade_arrow_paths, "path"))
# 成交列表是否显示
trades_table = driver.find_element_by_css_selector("div.reports.trades-table>table")
wait = WebDriverWait(driver, 30)
wait.until(element_has_child(trades_table, "tbody>tr"))
driver.close()
class element_has_child(object):
def __init__(self, element, css_selector):
self.element = element
self.css_selector = css_selector
def __call__(self, driver):
children = self.element.find_element_by_css_selector(self.css_selector)
if not children:
return False
return True
if __name__ == "__main__":
unittest.main()
|
NonSSL.py
|
#!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import requests
import os
from threading import Thread
import sys
from multiprocessing import current_process
import sessionvalidation.sessionvalidation as sv
import lib.result as result
import extractHeader
import mainProcess
import json
import gzip
bSTOP = False
def createDummyBodywithLength(numberOfbytes):
if numberOfbytes <= 0:
return None
body = 'a'
while numberOfbytes != 1:
body += 'b'
numberOfbytes -= 1
return body
def handleResponse(response, *args, **kwargs):
print(response.status_code)
# resp=args[0]
#expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2)
#expected_output = (int(expected_output_split[1]), str( expected_output_split[2]))
#r = result.Result(session_filename, expected_output[0], response.status_code)
# print(r.getResultString(colorize=True))
# make sure len of the message body is greater than length
def gen():
yield 'pforpersia,champaignurbana'.encode('utf-8')
yield 'there'.encode('utf-8')
def txn_replay(session_filename, txn, proxy, result_queue, request_session):
""" Replays a single transaction
:param request_session: has to be a valid requests session"""
req = txn.getRequest()
resp = txn.getResponse()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier
if 'body' in txn_req_headers_dict:
del txn_req_headers_dict['body']
#print("Replaying session")
try:
# response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers),
# 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
# headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me
method = extractHeader.extract_txn_req_method(txn_req_headers)
response = None
body = None
content = None
if 'Transfer-Encoding' in txn_req_headers_dict:
# deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header
# This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this
# BUT, this is not a problem if the data is not chunked encoded.. Strange, huh?
del txn_req_headers_dict['Host']
if 'Content-Length' in txn_req_headers_dict:
#print("ewww !")
del txn_req_headers_dict['Content-Length']
body = gen()
if 'Content-Length' in txn_req_headers_dict:
nBytes = int(txn_req_headers_dict['Content-Length'])
body = createDummyBodywithLength(nBytes)
#print("request session is",id(request_session))
if method == 'GET':
response = request_session.get('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
headers=txn_req_headers_dict, stream=False, allow_redirects=False, data=body)
if 'Content-Length' in response.headers:
content = response.raw
#print("len: {0} received {1}".format(response.headers['Content-Length'],content))
elif method == 'POST':
response = request_session.post('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
headers=txn_req_headers_dict, stream=False, data=body, allow_redirects=False)
if 'Content-Length' in response.headers:
content = response.raw
# print("reading==========>>>>>>>>>>>>>.")
# print(content.data)
#print("len: {0} received {1}".format(response.headers['Content-Length'],content))
elif method == 'HEAD':
response = request_session.head('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
headers=txn_req_headers_dict, stream=False)
#gzip_file = gzip.GzipFile(fileobj=content)
#shutil.copyfileobj(gzip_file, f)
expected = extractHeader.responseHeader_to_dict(resp.getHeaders())
# print(expected)
if mainProcess.verbose:
expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2)
expected_output = (int(expected_output_split[1]), str(expected_output_split[2]))
r = result.Result(session_filename, expected_output[0], response.status_code)
print(r.getResultString(response.headers, expected, colorize=True))
r.Compare(response.headers, expected)
# result_queue.put(r)
except UnicodeEncodeError as e:
# these unicode errors are due to the interaction between Requests and our wiretrace data.
# TODO fix
print("UnicodeEncodeError exception")
except requests.exceptions.ContentDecodingError as e:
print("ContentDecodingError", e)
except:
e = sys.exc_info()
print("ERROR in requests: ", e, response, session_filename)
def session_replay(input, proxy, result_queue):
global bSTOP
''' Replay all transactions in session
This entire session will be replayed in one requests.Session (so one socket / TCP connection)'''
# if timing_control:
# time.sleep(float(session._timestamp)) # allow other threads to run
while bSTOP == False:
for session in iter(input.get, 'STOP'):
# print(bSTOP)
if session == 'STOP':
print("Queue is empty")
bSTOP = True
break
with requests.Session() as request_session:
request_session.proxies = proxy
for txn in session.getTransactionIter():
try:
txn_replay(session._filename, txn, proxy, result_queue, request_session)
except:
e = sys.exc_info()
print("ERROR in replaying: ", e, txn.getRequest().getHeaders())
bSTOP = True
#print("Queue is empty")
input.put('STOP')
break
def client_replay(input, proxy, result_queue, nThread):
Threads = []
for i in range(nThread):
t = Thread(target=session_replay, args=[input, proxy, result_queue])
t.start()
Threads.append(t)
for t1 in Threads:
t1.join()
|
new_mega_account.py
|
# fork of https://github.com/IceWreck/MegaScripts
# Create New Mega Accounts
# saves credentials to a file called accounts.csv
import requests
import subprocess
import time
import re
import random
import string
import threading
EMAIL_LENGTH = 16
MINIMUM_PASSWORD_LENGTH = 32
ACCOUNT_TO_GENERATE = int(input("Insert how many account have i to generate: "))
def find_url(string):
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
url = re.findall(regex, string)
return [x[0] for x in url]
class MegaAccount:
def __init__(self, name, password):
self.name = name
self.password = password
def register(self):
mail_req = requests.get(
"https://api.guerrillamail.com/ajax.php?f=get_email_address&lang=en"
).json()
self.email = mail_req["email_addr"]
self.email_token = mail_req["sid_token"]
# begin resgistration
registration = subprocess.run(
[
"megareg",
"--register",
"--email",
self.email,
"--name",
self.name,
"--password",
self.password,
],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = registration.stdout
rows = output.split("\n")
self.verify_command = None
for row in rows:
if "megareg --verify" in row:
self.verify_command = row
def no_verify_command(self):
return self.verify_command is None
def verify(self):
# check if there is mail
mail_id = None
for i in range(5):
if mail_id is not None:
break
time.sleep(10)
check_mail = requests.get(
f"https://api.guerrillamail.com/ajax.php?f=get_email_list&offset=0&sid_token={self.email_token}"
).json()
for email in check_mail["list"]:
if "MEGA" in email["mail_subject"]:
mail_id = email["mail_id"]
break
# get verification link
if mail_id is None:
return
view_mail = requests.get(
f"https://api.guerrillamail.com/ajax.php?f=fetch_email&email_id={mail_id}&sid_token={self.email_token}"
)
mail_body = view_mail.json()["mail_body"]
links = find_url(mail_body)
self.verify_command = str(self.verify_command).replace("@LINK@", links[2])
# perform verification
verification = subprocess.run(
self.verify_command,
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
if "registered successfully!" in str(verification.stdout):
print("Success. Account Details are: " + f"{self.email} - {self.password}")
# save to file
with open("accounts.txt", "a") as f:
f.write(f"{self.email}:{self.password}" + "\n")
f.close()
else:
print("Failed.")
def new_account():
name = "".join(random.choice(string.ascii_lowercase) for x in range(EMAIL_LENGTH))
password = name = "".join(random.choice(string.ascii_lowercase + string.ascii_uppercase) for x in range(MINIMUM_PASSWORD_LENGTH))
acc = MegaAccount(name, password)
acc.register()
if acc.no_verify_command():
print("Cannot retrieve verify command, sorry")
else:
print("Registered. Waiting for verification email...")
acc.verify()
if __name__ == "__main__":
# how many accounts to create at once (keep the number under 10)
for count in range(ACCOUNT_TO_GENERATE):
t = threading.Thread(target=new_account)
t.start()
|
scanner.py
|
#!/usr/bin/python
import socket
import struct
import os
import threading
import time
from ctypes import *
from optparse import OptionParser
from netaddr import IPNetwork, IPAddress
HELP = """\
Sniffer reads a single package decodes the IP Layer and prints it out.
"""
magic_message ="Ad Astra!"
g_verbose = True
class IP(Structure):
_fields_= [
("ihl", c_ubyte,4),
("version", c_ubyte,4),
("tos", c_ubyte),
("len", c_ushort),
("id", c_ushort),
("offset", c_ushort),
("ttl", c_ubyte),
("protocol_num", c_ubyte),
("sum", c_ushort),
("src", c_ulong),
("dst", c_ulong)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer = None):
# map protocol constats to their names
self.protocol_map = {1:"ICMP", 6:"TCP", 17: "UDP"}
# human readable IP addresses
self.src_address = socket.inet_ntoa(struct.pack("<L", self.src))
self.dest_address = socket.inet_ntoa(struct.pack("<L", self.dst))
# humen readable protocol
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
class ICMP(Structure):
_fields_ = [
("type", c_ubyte),
("code", c_ubyte),
("checksum",c_ushort),
("unused", c_ushort),
("next_hop_mtu,", c_ushort)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__ (self, socket_buffer):
pass
def udp_sender(subnet, magic_message):
time.sleep(5)
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message, ("%s" % ip, 65212))
except:
pass
def parse_options():
global g_verbose
parser = OptionParser(usage='usage: %prog [options] <remote-ip>',
version='%prog 1.0', description=HELP)
parser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True,
help='squelch all informational output')
parser.add_option('-s', '--subnet', action='store', type='string', dest='subnet',
default=None,
help='subnet to scan')
parser.add_option('-p', '--remote-port', action='store', type='int', dest='port',
default=0,
help='port on server to listen on')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
if options.subnet is None:
parser.error('Subnet required (-s).')
g_verbose = options.verbose
return options, args[0]
def verbose(s):
if g_verbose:
print(s)
def main():
options, remote = parse_options()
if os.name == "nt":
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
verbose ('Bind to %s:%d' % (remote, options.port))
sniffer.bind((remote, options.port))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
verbose ('Sniffing package from %s:%d' % (remote, options.port))
# start sending packages
verbose ("Start sending packages...")
t = threading.Thread(target = udp_sender, args=(options.subnet,magic_message))
t.start()
#read singel package and decode
try:
while True:
raw_buffer = sniffer.recvfrom(65565)[0]
ip_header = IP(raw_buffer[0:20])
verbose ("Protocol: %s %s -> %s" % (ip_header.protocol, ip_header.src_address, ip_header.dest_address))
if ip_header.protocol == "ICMP":
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset+sizeof(ICMP)]
icmp_header = ICMP(buf)
verbose ("ICMP -> Type: %d Code: %d" % (icmp_header.type, icmp_header.code))
if (icmp_header.type == 3 and icmp_header.code == 3):
# check IPHeader
if IPAddress(ip_header.src_address) in IPNetwork(options.subnet):
# check magic_message
if raw_buffer[len(raw_buffer)-len(magic_message):] == magic_message:
print "Host Up: %s" % ip_header.src_address
except KeyboardInterrupt:
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
if __name__ == '__main__':
main()
|
compaction_handler.py
|
# !/usr/bin/python
# Copyright 2011 Lockheed Martin
'''
View Compaction class.
Created on August 31, 2011
@author: jklo
'''
import logging
import json
import base64
import urllib2
import multiprocessing
from lr.lib.couch_change_monitor.base_change_threshold_handler import BaseChangeThresholdHandler
log = logging.getLogger(__name__)
class CompactionHandler(BaseChangeThresholdHandler):
"""Class to compact the views of the monitored database based on a change count
or time threshold. Derived class is expected to implements the _canHandle
predicate function to filter what changes should be counted.
"""
def _canHandle(self, change, database):
"""Since we don't care what what kind of doc this is, just return true."""
return True
def _compactView(self, compactUrl, credentials):
log.debug('start view compaction %s' % compactUrl)
#curl -H "Content-Type: application/json" -X POST http://localhost:5984/dbname/_compact/designname
req = urllib2.Request(compactUrl, data="", headers={"Content-Type": "application/json"})
if credentials is not None and len(credentials) > 0:
base64string = base64.encodestring('%s:%s' % credentials).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
log.debug(urllib2.urlopen(req).read())
def _handle(self, change, database):
log.debug("class: {0} Updating views ...".format(self.__class__.__name__))
try:
designDocs = database.view('_all_docs', include_docs=True,
startkey='_design%2F', endkey='_design0')
for designDoc in designDocs:
viewInfo = "{0}/{1}/_info".format(database.resource.url, designDoc.id)
viewInfo = json.load(urllib2.urlopen(viewInfo))
if (not viewInfo['view_index']['updater_running'] and
not viewInfo['view_index']['compact_running'] and
'views' in designDoc.doc and len(designDoc.doc['views']) > 0):
designName = designDoc.id.split('/')[1]
# http://localhost:5984/dbname/_compact/designname
compactName = "_compact/{0}".format(designName)
compactUrl = '/'.join([database.resource.url, compactName])
auth = database.resource.credentials
multiprocessing.Process(target=self._compactView, args=(compactUrl, auth)).start()
except Exception as e:
log.error(e)
|
qt.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
from threading import Thread
import re
from decimal import Decimal
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from electrum.gui.qt.util import *
from electrum.gui.qt.qrcodewidget import QRCodeWidget
from electrum.gui.qt.amountedit import AmountEdit
from electrum.gui.qt.main_window import StatusBarButton
from electrum.i18n import _
from electrum.plugin import hook
from electrum.util import PrintError, is_valid_email
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, PrintError):
def __init__(self, plugin, window):
super().__init__()
self.plugin = plugin
self.window = window
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
try:
wallet.on_otp(tx, auth_code)
except:
on_failure(sys.exc_info())
return
on_success(tx)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.start_request_thread(wallet)
window.show_error(_('Requesting account info from TrustedCoin server...') + '\n' +
_('Please try again.'))
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "monetaryunit:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def go_online_dialog(self, wizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
import traceback
traceback.print_exc(file=sys.stderr)
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + str(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ipaddress
import random
import re
import socket
import time
import weakref
from datetime import timedelta
from threading import Event, Thread
from typing import Any, Callable, Dict, Optional, Tuple, Union
def _parse_rendezvous_config(config_str: str) -> Dict[str, str]:
"""Extracts key-value pairs from a rendezvous configuration string.
Args:
config_str:
A string in format <key1>=<value1>,...,<keyN>=<valueN>.
"""
config: Dict[str, str] = {}
config_str = config_str.strip()
if not config_str:
return config
key_values = config_str.split(",")
for kv in key_values:
key, *values = kv.split("=", 1)
key = key.strip()
if not key:
raise ValueError(
"The rendezvous configuration string must be in format "
"<key1>=<value1>,...,<keyN>=<valueN>."
)
value: Optional[str]
if values:
value = values[0].strip()
else:
value = None
if not value:
raise ValueError(
f"The rendezvous configuration option '{key}' must have a value specified."
)
config[key] = value
return config
def _try_parse_port(port_str: str) -> Optional[int]:
"""Tries to extract the port number from ``port_str``."""
if port_str and re.match(r"^[0-9]{1,5}$", port_str):
return int(port_str)
return None
def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> Tuple[str, int]:
"""Extracts the hostname and the port number from a rendezvous endpoint.
Args:
endpoint:
A string in format <hostname>[:<port>].
default_port:
The port number to use if the endpoint does not include one.
Returns:
A tuple of hostname and port number.
"""
if endpoint is not None:
endpoint = endpoint.strip()
if not endpoint:
return ("localhost", default_port)
# An endpoint that starts and ends with brackets represents an IPv6 address.
if endpoint[0] == "[" and endpoint[-1] == "]":
host, *rest = endpoint, *[]
else:
host, *rest = endpoint.rsplit(":", 1)
# Sanitize the IPv6 address.
if len(host) > 1 and host[0] == "[" and host[-1] == "]":
host = host[1:-1]
if len(rest) == 1:
port = _try_parse_port(rest[0])
if port is None or port >= 2 ** 16:
raise ValueError(
f"The port number of the rendezvous endpoint '{endpoint}' must be an integer "
"between 0 and 65536."
)
else:
port = default_port
if not re.match(r"^[\w\.:-]+$", host):
raise ValueError(
f"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of "
"labels, an IPv4 address, or an IPv6 address."
)
return host, port
def _matches_machine_hostname(host: str) -> bool:
"""Indicates whether ``host`` matches the hostname of this machine.
This function compares ``host`` to the hostname as well as to the IP
addresses of this machine. Note that it may return a false negative if this
machine has CNAME records beyond its FQDN or IP addresses assigned to
secondary NICs.
"""
if host == "localhost":
return True
try:
addr = ipaddress.ip_address(host)
except ValueError:
addr = None
if addr and addr.is_loopback:
return True
this_host = socket.gethostname()
if host == this_host:
return True
addr_list = socket.getaddrinfo(
this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME
)
for addr_info in addr_list:
# If we have an FQDN in the addr_info, compare it to `host`.
if addr_info[3] and addr_info[3] == host:
return True
# Otherwise if `host` represents an IP address, compare it to our IP
# address.
if addr and addr_info[4][0] == str(addr):
return True
return False
def _delay(seconds: Union[float, Tuple[float, float]]) -> None:
"""Suspends the current thread for ``seconds``.
Args:
seconds:
Either the delay, in seconds, or a tuple of a lower and an upper
bound within which a random delay will be picked.
"""
if isinstance(seconds, tuple):
seconds = random.uniform(*seconds)
# Ignore delay requests that are less than 10 milliseconds.
if seconds >= 0.01:
time.sleep(seconds)
class _PeriodicTimer:
"""Represents a timer that periodically runs a specified function.
Args:
interval:
The interval, in seconds, between each run.
function:
The function to run.
"""
# The state of the timer is hold in a separate context object to avoid a
# reference cycle between the timer and the background thread.
class _Context:
interval: float
function: Callable[..., None]
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
stop_event: Event
_name: Optional[str]
_thread: Optional[Thread]
_finalizer: Optional[weakref.finalize]
# The context that is shared between the timer and the background thread.
_ctx: _Context
def __init__(
self,
interval: timedelta,
function: Callable[..., None],
*args: Any,
**kwargs: Any,
) -> None:
self._name = None
self._ctx = self._Context()
self._ctx.interval = interval.total_seconds()
self._ctx.function = function # type: ignore[assignment]
self._ctx.args = args or ()
self._ctx.kwargs = kwargs or {}
self._ctx.stop_event = Event()
self._thread = None
self._finalizer = None
@property
def name(self) -> Optional[str]:
"""Gets the name of the timer."""
return self._name
def set_name(self, name: str) -> None:
"""Sets the name of the timer.
The specified name will be assigned to the background thread and serves
for debugging and troubleshooting purposes.
"""
if self._thread:
raise RuntimeError("The timer has already started.")
self._name = name
def start(self) -> None:
"""Start the timer."""
if self._thread:
raise RuntimeError("The timer has already started.")
self._thread = Thread(
target=self._run, name=self._name or "PeriodicTimer", args=(self._ctx,), daemon=True
)
# We avoid using a regular finalizer (a.k.a. __del__) for stopping the
# timer as joining a daemon thread during the interpreter shutdown can
# cause deadlocks. The weakref.finalize is a superior alternative that
# provides a consistent behavior regardless of the GC implementation.
self._finalizer = weakref.finalize(
self, self._stop_thread, self._thread, self._ctx.stop_event
)
# We do not attempt to stop our background thread during the interpreter
# shutdown. At that point we do not even know whether it still exists.
self._finalizer.atexit = False
self._thread.start()
def cancel(self) -> None:
"""Stop the timer at the next opportunity."""
if self._finalizer:
self._finalizer()
@staticmethod
def _run(ctx) -> None:
while not ctx.stop_event.wait(ctx.interval):
ctx.function(*ctx.args, **ctx.kwargs)
@staticmethod
def _stop_thread(thread, stop_event):
stop_event.set()
thread.join()
|
workload_D.py
|
"""
This workload presents a simple interface that can be reused in other workloads.
It summary, it runs several subprocesses using the multiprocessing package,
makes the connections between them, and then starts working.
This particular workload spawns NETWORK_SIZE machines, NUM_PROPOSERS of which are
proposers. We can run with either gRPC or RDTP by changing start_vm or
proposer_entrypoint / replicas_entrypoint. Just make sure that the workload
does not start with replicas running gRPC and proposer running RDTP.
"""
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import time
from multiprocessing import Process
from paxos.vm import VM
from paxos.messengers import rpcMessenger
from paxos.receivers import rpcReceiver
from paxos.messengers import rdtpMessenger
from paxos.receivers import rdtpReceiver
from paxos import proposer, acceptor, learner
NETWORK_SIZE = 5
NUM_PROPOSERS = 3
HOST = "localhost"
START_PORT = 6666
def initialize_rdtp_vm(name, use_disk):
return VM(name, rdtpMessenger.rdtpMessenger, rdtpReceiver.rdtpReceiver, use_disk)
def initialize_grpc_vm(name, use_disk):
return VM(name, rpcMessenger.grpcMessenger, rpcReceiver.grpcReceiver, use_disk)
def start_vm(name, network, initialize_vm = initialize_rdtp_vm):
"""
Starts a virtual machine with a given initializer.
Already starts serving.
@param name: the name of the machine that will be started
@param network: a dictionary containing information on the machines in
the network
@param initialize_vm: an initializer for the VM; by default, RDTP
@return: The instance of the RDTP virtual machine
"""
# initialize the virtual machine with my name
vm = initialize_vm(name, use_disk=False)
# fetch the host/port information from the network for me
host, port = network[name]
# add other machines
for friend_name, (friend_host, friend_port) in network.iteritems():
# !# should we send it to ourselves?
if friend_name == name:
continue
vm.add_destination(friend_name, friend_host, friend_port)
# start serving
vm.serve(host, port)
return vm
def proposer_entrypoint(name, network):
"""
Thread entrypoint for a proposer.
This must simply call start_rdtp_vm with our name and network.
"""
# start an rdtp VM with our name and start serving
vm = start_vm(name, network)
# sleep a little bit before trying to send proposals
# (cheating for bootstrap)
time.sleep(2)
# decree number and value; these will change
n = 0
v = 5000
while True:
# propose values
vm.propose_to_quorum(n, v)
# update values for next round
n += 1
v -= 1
# give some time before proposing again
time.sleep(1)
def replicas_entrypoint(name, network):
# start an rdtp VM with our name and start serving
vm = start_vm(name, network)
# simply sleep forever, the server will handle the
# necessary requests
try:
while True:
time.sleep(600)
except KeyboardInterrupt:
vm.stop_server()
def main():
"""
Main routine for this workload; spawn a single proposer and a variable
number of acceptors (NETWORK_SIZE - 1).
"""
# guarantee that we have enough proposers
assert(NUM_PROPOSERS < NETWORK_SIZE)
# a network is a dictionary of names => (host, port)
# we first build a network; then we spawn proposers, and finally
# spawn replicas
network = {}
# initialize the network
for i in xrange(NETWORK_SIZE):
name = "M" + str(i)
network[name] = (HOST, START_PORT + i)
# initialize the proposer process
for i in xrange(NUM_PROPOSERS):
proposer = Process(target = proposer_entrypoint, args = ("M" + str(i), network))
proposer.start()
# initialize all the replicas
for name in network.keys():
# these are all proposers; ignore them here
if name < "M" + str(NUM_PROPOSERS):
continue
replicas = Process(target = replicas_entrypoint, args = (name, network))
replicas.start()
if __name__ == "__main__":
main()
|
monitor.py
|
import os
import time
import threading
class MonitoredLogFile(object):
MAX_LINES_TO_READ = 10
def __init__(self, path, prefix):
self._path = str(path)
self._prefix = str(prefix)
self._offset = 0
def update(self):
# skip over when the log file has not been made yet
if not os.path.isfile(self._path):
return
with open(self._path, 'r') as log_file:
log_file.seek(self._offset)
for _ in range(self.MAX_LINES_TO_READ):
# attempt to read another line
last_offset = self._offset
try:
line = log_file.readline()
except:
line = "UNREADABLE_LINE"
pass
self._offset = log_file.tell()
# in the case when we could not read any more line data
if last_offset == self._offset:
break
# display the output
#print('{:6d} {}: {}'.format(self._offset, self._prefix, line.rstrip()))
print('{}: {}'.format(self._prefix, line.rstrip()))
class ConstellationMonitor(object):
def __init__(self, nodes):
self._nodes = nodes
self._log_files = []
self._running = False
self._thread = None
def start(self):
# kick off the thread
self._running = True
self._thread = threading.Thread(target=self._background_monitor)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._running = False
self._thread.join()
def _background_monitor(self):
# create the log file watchers
monitors = [MonitoredLogFile(n.log_path, n.p2p_address)
for n in self._nodes]
while self._running:
for monitor in monitors:
monitor.update()
time.sleep(0.35)
|
barista.py
|
from utils import json_config
import msgpack
from threading import Thread
import Queue
import time
from cookbook_manager import CookbookManager
from cookbook import Cookbook
from process.process import Point
from process.process import Command
from process.process import Process
from printer_server import PrinterServer
from heater_server import HeaterServer
from refill_server import RefillServer
from nanomsg import (
SUB,
SUB_SUBSCRIBE,
PAIR,
DONTWAIT,
Socket
)
from utils import channel
import logging
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
class Barista(object):
IDLE = 'Idle'
BREWING = 'Brewing'
def __init__(self):
self.heater_temperature = 0
self.heater_duty_cycle = 0
self.heater_set_point = 0
self.heater_update_time = 0
self.is_water_full = False
self.total_cmd = 0
self.state = self.IDLE
self.printer_progress = 0
self.printer_state = 0
self.printer_state_string = ''
self.now_cookbook_name = ''
self.now_step = ''
self.now_step_index = 0
self.now_process = ''
self.now_process_index = 0
self.brew_queue = Queue.Queue()
self.stop = False
logger.info('Start printer server ...')
printer_server = PrinterServer()
printer_server_thread = self.__start_worker(
target=printer_server.start)
logger.info('Start heater server ...')
heater_server = HeaterServer()
heater_server_thread = self.__start_worker(target=heater_server.start)
logger.info('Start refill server ...')
refill_server = RefillServer()
refill_server_thread = self.__start_worker(target=refill_server.start)
# Read config
self.config = json_config.parse_json('config.json')
cfg = self.config['PID']
self.pid_cycle_time = cfg['cycle_time']
self.pid_k = cfg['k']
self.pid_i = cfg['i']
self.pid_d = cfg['d']
# Create nanomsg socket to publish status and receive command
logger.info('Connect to printer server ...')
cfg = self.config['PrinterServer']
self.printer_cmd = channel.Channel(
cfg['Command_Socket_Address'], 'Pair', False)
self.printer_pub = channel.Channel(
cfg['Publish_Socket_Address'], 'Sub', False)
logger.info('Connect to heater server ...')
cfg = self.config['HeaterServer']
self.heater_cmd = channel.Channel(
cfg['Command_Socket_Address'], 'Pair', False)
self.heater_pub = channel.Channel(
cfg['Publish_Socket_Address'], 'Sub', False)
logger.info('Connect to refill server ...')
cfg = self.config['RefillServer']
self.refill_cmd = channel.Channel(
cfg['Command_Socket_Address'], 'Pair', False)
self.refill_pub = channel.Channel(
cfg['Publish_Socket_Address'], 'Sub', False)
logger.info('Start monitor workers ...')
self.temperature_worker = self.__start_worker(
self.__temperature_monitor)
self.water_level_worker = self.__start_worker(
self.__water_level_monitor)
self.printer_worker = self.__start_worker(self.__printer_monitor)
self.brew_worker = self.__start_worker(self.__brew_worker)
def __start_worker(self, target):
worker = Thread(target=target)
worker.daemon = True
worker.start()
return worker
def __temperature_monitor(self):
"""
Receive the heater server published information
{
"cycle_time": 5,
"duty_cycle": 70,
"set_point": 80,
"temperature": 26.53
}
"""
while True:
resp = self.heater_pub.recv()
self.heater_temperature = round(resp.get('temperature', 0), 3)
self.heater_set_point = round(resp.get('set_point', 0), 3)
self.heater_duty_cycle = round(resp.get('duty_cycle', 0), 3)
self.heater_update_time = time.time()
logger.debug('Receive new temperature {}'.format(resp))
def __water_level_monitor(self):
while True:
resp = self.refill_pub.recv()
self.is_water_full = resp.get('full', None)
logger.debug(
'Receive new water level {}'.format(self.is_water_full))
def __printer_monitor(self):
while True:
data = self.printer_pub.recv()
#logging.info("Receive message from printer: {}".format(data))
if 'progress' in data:
self.printer_progress = data['progress']
if 'state' in data:
self.printer_state = data['state']
self.printer_state_string = data['state_string']
def __change_state(self, state):
logger.info(
'Barista change state from {} to {}'.format(self.state, state))
self.state = state
def __brew_worker(self):
while True:
cookbook_name = self.brew_queue.get()
logger.info('Start to cook {}'.format(cookbook_name))
self.refill_cmd.send({'Refill': 'STOP'})
self.__change_state(self.BREWING)
self.now_cookbook_name = cookbook_name
self.__brew(cookbook_name)
# Clean status
self.now_cookbook_name = ''
self.now_step = ''
self.now_step_index = 0
self.now_process = ''
self.now_process_index = 0
self.stop = False
self.__change_state(self.IDLE)
self.refill_cmd.send({'Refill': 'START'})
def __brew(self, cookbook_name):
cmgr = CookbookManager()
cookbook = cmgr.get(cookbook_name)
logger.debug('Barista look the cookbook')
self.reset_gcode_counter()
self.__init_printer()
for step_index, step in enumerate(cookbook.steps):
logger.debug('# Start step {}'.format(step.title))
self.now_step = step.title
self.now_step_index = step_index
for process_index, process in enumerate(step.processes):
logger.debug('## Start process {}'.format(process.title))
self.now_process = process.title
self.now_process_index = process_index
for block in process.blocks:
logger.debug('### Start block {}'.format(block.lang))
self.handle_block(block)
self.wait_printer_finish()
def __send_to_printer(self, cmd):
if 'C' in cmd:
self.total_cmd = self.total_cmd + 1
elif 'G' in cmd:
self.total_cmd = self.total_cmd + len(cmd['G'])
self.printer_cmd.send(cmd)
def reset_gcode_counter(self):
self.total_cmd = 0
self.printer_cmd.send({'RESET_COUNT': 0})
def __init_printer(self):
self.__send_to_printer({'C': 'G21'})
self.__send_to_printer({'C': 'G28'})
self.__send_to_printer({'C': 'G90'})
self.__send_to_printer({'C': 'M83'})
self.wait_printer_finish()
def __convert_to_gcode(self, point):
gcode = 'G1'
if point.x is not None:
gcode = gcode + ' X{}'.format(point.x)
if point.y is not None:
gcode = gcode + ' Y{}'.format(point.y)
if point.z is not None:
gcode = gcode + ' Z{}'.format(point.z)
if point.e1 is not None:
gcode = gcode + ' E{}'.format(point.e1)
if point.f is not None:
gcode = gcode + ' F{}'.format(point.f)
return gcode
def brew(self, name):
self.brew_queue.put(name)
def stop_brew(self):
self.stop = True
self.__send_to_printer({'STOP': True})
def handle_block(self, block):
points = block.points()
gcodes = []
for point in points:
if type(point) is Point:
gcode = self.__convert_to_gcode(point)
gcodes.append(gcode)
elif type(point) is Command:
cmd = point.command
value = point.value
if cmd == 'Home':
self.__send_to_printer({'C': 'G28'})
elif cmd == 'Refill':
self.refill_cmd.send({'Refill': 'START'})
self.wait_refill()
elif cmd == 'Heat':
# Wait temperature to target temperature
self.wait_temperature(value)
elif cmd == 'Wait':
self.wait_printer_finish()
logger.debug('Sleep {} seconds'.format(value))
time.sleep(value)
if gcodes:
self.__send_to_printer({'G': gcodes})
def printer_jog(self, x=None, y=None, z=None, e1=None, e2=None, f=None):
point = Point(x, y, z, e1, e2, f)
self.__send_to_printer({'C': self.__convert_to_gcode(point)})
return
def set_temperature(self, value):
payload = {
'cycle_time': self.pid_cycle_time,
'k': self.pid_k,
'i': self.pid_i,
'd': self.pid_d,
'set_point': value
}
self.heater_cmd.send(payload)
def wait_temperature(self, value):
self.set_temperature(value)
# Wait the tempature
while not ((value - 0.5) < self.heater_temperature < (value + 0.5)):
logger.debug('Waiting temperature to {}'.format(value))
time.sleep(2)
def wait_refill(self):
self.refill_cmd.send({'Refill': True})
# Wait refill finished
while not self.is_water_full:
logger.debug(
'Now water level is {}, waiting to full'.format(self.is_water_full))
time.sleep(2)
def wait_printer(self, cmd_count):
while not (self.printer_progress == cmd_count):
if self.stop:
break
logger.debug('Now printer progress {}, wait it to {}'.format(
self.printer_progress, cmd_count))
time.sleep(1)
logger.debug('Done printer progress {}, wait it to {}'.format(
self.printer_progress, cmd_count))
self.printer_progress = 0
def wait_printer_finish(self):
self.wait_printer(self.total_cmd)
def wait_printer_operational(self):
while self.printer_state_string is not None and self.printer_state_string != 'Operational':
time.sleep(1)
def wait_printer_printing(self):
while self.printer_state_string is not None and self.printer_state_string != 'Printing':
time.sleep(1)
def go_home(self):
if self.state == self.IDLE:
self.__send_to_printer({'C': 'G28'})
|
loading.py
|
###################
## ANAK BABI KAU ##
###################
## WARNA ASU #####
P = '\x1b[1;97m' # PUTIH
M = '\x1b[1;91m' # MERAH
H = '\x1b[1;92m' # HIJAU
K = '\x1b[1;93m' # KUNING
B = '\x1b[1;94m' # BIRU
U = '\x1b[1;95m' # UNGU
O = '\x1b[1;96m' # BIRU MUDA
N = '\x1b[0m' # WARNA MATI
##################
## XNXX ##############
inu =print
april_cantik =input
#######################################
## NIH ANIMASI LOADING PYTHON ANJING ##
##################################################
## AUTHOR : MR-Xyaa ##
## GITHUB : https://github.com/MRXyaa ##
##################################################
## ##
## ###
### ####
#### #####
##### ######
###### #######
####### ########
######## #########
## ##
## MR Xyaa ##
##################################################
######## ##########
######### ###########
########## ############
########### #############
############ ##############
############# ###############
############## ################
############### #################
################ ##################
### # # ###
## # # ##
## # # ##
## # # ##
## # # ##
## # # ##
## ; # # ##
## # # ##
## IBNU ##
import itertools
import threading
import time
import sys
import os
##################
def inu_april():
os.system('clear')
inu ('\x1b[1;92mSABAR YA ANJINGG')
inu ('')
inu_april()
##################
done = False
##################
## ANIMASI KNTL MMEK ##
#################
def aprillia():
for c in itertools.cycle(['\x1b[1;93m▓■■■■■■■','\x1b[1;93m■\x1b[1;95m▓■■■■■■','\x1b[1;95m■■\x1b[1;96m▓■■■■■','\x1b[1;96m■■■\x1b[1;94m▓■■■■','\x1b[1;94m■■■■\x1b[1;91m▓■■■','\x1b[1;91m■■■■■\x1b[1;92m▓■■','\x1b[1;92m■■■■■■\x1b[1;91m▓■','\x1b[1;91m■■■■■■■\x1b[1;95m▓','\x1b[1;92m■']):
if done:
break
sys.stdout.write('\r\x1b[1;97mLOADING ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rBerhasil 100')
inu_sayang_april = threading.Thread(target=aprillia)
inu_sayang_april.start()
#########################
## LAMA DI SINI ANJING ##
#########################
time.sleep(5)
done = True
#############
## LOGO KONTOL ##
#############
sayang_april =("""
\x1b[1;91m##################################################
##################################################
######## ##########
######## ##########
######## ##########
######## ##########
######## ##########
## ##
##___________________\x1b[1;92m MR Xyaa\x1b[1;91m___________________##
########## ############
########## ############
\x1b[1;97m########## ############
########## ############
########### #############
############ ##############
############# ###############
############## ################
############### #################
################| |##################
### # | | # ###
## # # ##
## # # ##
## # # ##
## # # ##
## # # ##
## # # ##
## # # ##
## \x1b[1;93mIBNU \x1b[1;97m ##
""")
def april_cantik_banget():
os.system('clear')
april_cantik_banget()
inu (sayang_april)
###################
## CUMA LOADING + LOGO ##
###################
|
test__semaphore.py
|
###
# This file is test__semaphore.py only for organization purposes.
# The public API,
# and the *only* correct place to import Semaphore --- even in tests ---
# is ``gevent.lock``, never ``gevent._semaphore``.
##
from __future__ import print_function
from __future__ import absolute_import
import weakref
import gevent
import gevent.exceptions
from gevent.lock import Semaphore
from gevent.lock import BoundedSemaphore
import gevent.testing as greentest
from gevent.testing import timing
class TestSemaphore(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
# The order, though, is not guaranteed.
self.assertEqual(sorted(result), ['a', 'b'])
def test_semaphore_weakref(self):
s = Semaphore()
r = weakref.ref(s)
self.assertEqual(s, r())
@greentest.ignores_leakcheck
def test_semaphore_in_class_with_del(self):
# Issue #704. This used to crash the process
# under PyPy through at least 4.0.1 if the Semaphore
# was implemented with Cython.
class X(object):
def __init__(self):
self.s = Semaphore()
def __del__(self):
self.s.acquire()
X()
import gc
gc.collect()
gc.collect()
def test_rawlink_on_unacquired_runs_notifiers(self):
# https://github.com/gevent/gevent/issues/1287
# Rawlinking a ready semaphore should fire immediately,
# not raise LoopExit
s = Semaphore()
gevent.wait([s])
class TestSemaphoreMultiThread(greentest.TestCase):
# Tests that the object can be acquired correctly across
# multiple threads.
# Used as a base class.
# See https://github.com/gevent/gevent/issues/1437
def _getTargetClass(self):
return Semaphore
def _makeOne(self):
# Create an object that is associated with the current hub. If
# we don't do this now, it gets initialized lazily the first
# time it would have to block, which, in the event of threads,
# would be from an arbitrary thread.
return self._getTargetClass()(1)
def _makeThreadMain(self, thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs):
from gevent._hub_local import get_hub_if_exists
import sys
def thread_main():
thread_running.set()
try:
acquired.append(
sem.acquire(**thread_acquire_kwargs)
)
except:
exc_info[:] = sys.exc_info()
raise # Print
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
thread_acquired.set()
return thread_main
IDLE_ITERATIONS = 5
def _do_test_acquire_in_one_then_another(self,
release=True,
require_thread_acquired_to_finish=False,
**thread_acquire_kwargs):
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
thread_running = threading.Event()
thread_acquired = threading.Event()
sem = self._makeOne()
# Make future acquires block
sem.acquire()
exc_info = []
acquired = []
t = threading.Thread(target=self._makeThreadMain(
thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs
))
t.daemon = True
t.start()
thread_running.wait(10) # implausibly large time
if release:
sem.release()
# Spin the loop to be sure the release gets through.
# (Release schedules the notifier to run, and when the
# notifier run it sends the async notification to the
# other thread. Depending on exactly where we are in the
# event loop, and the limit to the number of callbacks
# that get run (including time-based) the notifier may or
# may not be immediately ready to run, so this can take up
# to two iterations.)
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
self.assertEqual(acquired, [True])
if not release and thread_acquire_kwargs.get("timeout"):
# Spin the loop to be sure that the timeout has a chance to
# process. Interleave this with something that drops the GIL
# so the background thread has a chance to notice that.
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
thread_acquired.wait(timing.LARGE_TICK * 5)
if require_thread_acquired_to_finish:
self.assertTrue(thread_acquired.is_set())
try:
self.assertEqual(exc_info, [])
finally:
exc_info = None
return sem, acquired
def test_acquire_in_one_then_another(self):
self._do_test_acquire_in_one_then_another(release=True)
def test_acquire_in_one_then_another_timed(self):
sem, acquired_in_thread = self._do_test_acquire_in_one_then_another(
release=False,
require_thread_acquired_to_finish=True,
timeout=timing.SMALLEST_RELIABLE_DELAY)
self.assertEqual([False], acquired_in_thread)
# This doesn't, of course, notify anything, because
# the waiter has given up.
sem.release()
notifier = getattr(sem, '_notifier', None)
self.assertIsNone(notifier)
def test_acquire_in_one_wait_greenlet_wait_thread_gives_up(self):
# The waiter in the thread both arrives and gives up while
# the notifier is already running...or at least, that's what
# we'd like to arrange, but the _notify_links function doesn't
# drop the GIL/object lock, so the other thread is stuck and doesn't
# actually get to call into the acquire method.
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
sem = self._makeOne()
# Make future acquires block
sem.acquire()
def greenlet_one():
ack = sem.acquire()
# We're running in the notifier function right now. It switched to
# us.
thread.start()
gevent.sleep(timing.LARGE_TICK)
return ack
exc_info = []
acquired = []
glet = gevent.spawn(greenlet_one)
thread = threading.Thread(target=self._makeThreadMain(
threading.Event(), threading.Event(),
sem,
acquired, exc_info,
timeout=timing.LARGE_TICK
))
thread.daemon = True
gevent.idle()
sem.release()
glet.join()
for _ in range(3):
gevent.idle()
thread.join(timing.LARGE_TICK)
self.assertEqual(glet.value, True)
self.assertEqual([], exc_info)
self.assertEqual([False], acquired)
self.assertTrue(glet.dead, glet)
glet = None
def assertOneHasNoHub(self, sem):
self.assertIsNone(sem.hub, sem)
@greentest.skipOnPyPyOnWindows("Flaky there; can't reproduce elsewhere")
def test_dueling_threads(self, acquire_args=(), create_hub=None):
# pylint:disable=too-many-locals,too-many-statements
# Threads doing nothing but acquiring and releasing locks, without
# having any other greenlets to switch to.
# https://github.com/gevent/gevent/issues/1698
from gevent import monkey
from gevent._hub_local import get_hub_if_exists
self.assertFalse(monkey.is_module_patched('threading'))
import threading
from time import sleep as native_sleep
sem = self._makeOne()
self.assertOneHasNoHub(sem)
count = 10000
results = [-1, -1]
run = True
def do_it(ix):
if create_hub:
gevent.get_hub()
try:
for i in range(count):
if not run:
break
acquired = sem.acquire(*acquire_args)
assert acquire_args or acquired
if acquired:
sem.release()
results[ix] = i
if not create_hub:
# We don't artificially create the hub.
self.assertIsNone(
get_hub_if_exists(),
(get_hub_if_exists(), ix, i)
)
if create_hub and i % 10 == 0:
gevent.sleep(timing.SMALLEST_RELIABLE_DELAY)
elif i % 100 == 0:
native_sleep(timing.SMALLEST_RELIABLE_DELAY)
except Exception as ex: # pylint:disable=broad-except
import traceback; traceback.print_exc()
results[ix] = str(ex)
ex = None
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
t1 = threading.Thread(target=do_it, args=(0,))
t1.daemon = True
t2 = threading.Thread(target=do_it, args=(1,))
t2.daemon = True
t1.start()
t2.start()
t1.join(1)
t2.join(1)
while t1.is_alive() or t2.is_alive():
cur = list(results)
t1.join(7)
t2.join(7)
if cur == results:
# Hmm, after two seconds, no progress
run = False
break
self.assertEqual(results, [count - 1, count - 1])
def test_dueling_threads_timeout(self):
self.test_dueling_threads((True, 4))
def test_dueling_threads_with_hub(self):
self.test_dueling_threads(create_hub=True)
# XXX: Need a test with multiple greenlets in a non-primary
# thread. Things should work, just very slowly; instead of moving through
# greenlet.switch(), they'll be moving with async watchers.
class TestBoundedSemaphoreMultiThread(TestSemaphoreMultiThread):
def _getTargetClass(self):
return BoundedSemaphore
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase):
def test_c_extension(self):
self.assertEqual(Semaphore.__module__,
'gevent._gevent_c_semaphore')
class SwitchWithFixedHash(object):
# Replaces greenlet.switch with a callable object
# with a hash code we control. This only matters if
# we're hashing this somewhere (which we used to), but
# that doesn't preserve order, so we don't do
# that anymore.
def __init__(self, greenlet, hashcode):
self.switch = greenlet.switch
self.hashcode = hashcode
def __hash__(self):
raise AssertionError
def __eq__(self, other):
raise AssertionError
def __call__(self, *args, **kwargs):
return self.switch(*args, **kwargs)
def __repr__(self):
return repr(self.switch)
class FirstG(gevent.Greenlet):
# A greenlet whose switch method will have a low hashcode.
hashcode = 10
def __init__(self, *args, **kwargs):
gevent.Greenlet.__init__(self, *args, **kwargs)
self.switch = SwitchWithFixedHash(self, self.hashcode)
class LastG(FirstG):
# A greenlet whose switch method will have a high hashcode.
hashcode = 12
def acquire_then_exit(sem, should_quit):
sem.acquire()
should_quit.append(True)
def acquire_then_spawn(sem, should_quit):
if should_quit:
return
sem.acquire()
g = FirstG.spawn(release_then_spawn, sem, should_quit)
g.join()
def release_then_spawn(sem, should_quit):
sem.release()
if should_quit: # pragma: no cover
return
g = FirstG.spawn(acquire_then_spawn, sem, should_quit)
g.join()
class TestSemaphoreFair(greentest.TestCase):
def test_fair_or_hangs(self):
# If the lock isn't fair, this hangs, spinning between
# the last two greenlets.
# See https://github.com/gevent/gevent/issues/1487
sem = Semaphore()
should_quit = []
keep_going1 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
keep_going2 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
exiting = LastG.spawn(acquire_then_exit, sem, should_quit)
with self.assertRaises(gevent.exceptions.LoopExit):
gevent.joinall([keep_going1, keep_going2, exiting])
self.assertTrue(exiting.dead, exiting)
self.assertTrue(keep_going2.dead, keep_going2)
self.assertFalse(keep_going1.dead, keep_going1)
sem.release()
keep_going1.kill()
keep_going2.kill()
exiting.kill()
gevent.idle()
if __name__ == '__main__':
greentest.main()
|
mywindows.py
|
import Tkinter as tk
import fileinput
from PIL import Image, ImageTk
import pyaudio
import wave
import threading
import socket
import client as cl
fname = 'records/' # this variable take part in forming file name of record
chosen_var = '1' # default variant is '1'
timer_listsec = [5, 90, 90] # there are three conditions of timer 'be ready' 'preparing' and 'recording'. So each condition corresponds to each element of the list. This list defines number of seconds that timer is showing on each condition
c = 0 # this is a relative counter of conditions
total = 0 # this is an absolute counter of conditions
timer_running = False # flag for timer. Everything is understandable
default_seconds = timer_listsec[c] # Defines first condition of timer. 'Be ready'
timer_seconds = default_seconds
timer_labels = ["Be ready for task", "Preparing...", "Recording..."] # the same that timer_listsec, but it changes label below the timer, but not seconds
flag_next = True # flag which controlls switching tasks
flag_voice = True # flag which allows recording
numbers = { # This is a "kostyil'". It is intended to switch tasks. It tells program in which value of "total" it(program) have to change the filling of textbox
1: '1',
4: '2',
7: '3',
10: '4'
}
def exitAll(root):
root.destroy()
def openErr(root, error):
errform = tk.Toplevel(root)
errform.resizable(0, 0)
errform.geometry("300x90")
errform.transient(root)
errlbl = tk.Label(errform, text="Error/Warning:" + error + "\nPlease, complete all instructions", font="7")
errbtn = tk.Button(errform, text="Ok!", width=5, height=1, command=errform.destroy)
errlbl.place(x=10, y=20)
errbtn.place(x=15, y=60)
#
#Tasks section
#
def voiceRecorder(suff): #source: 'http://sharewebegin.blogspot.ru/2013/07/record-from-mic-python.html'
global flag_voice
flag_voice = True
CHUNK = 1024
FORMAT = pyaudio.paInt16 #paInt8
CHANNELS = 2
RATE = 44100 #sample rate
WAVE_OUTPUT_FILENAME = fname + suff + ".wav"
RECORD_SECONDS = timer_listsec[2]
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK) #buffer
print("* recording")
frames = []
i = 0
while flag_voice and i < int(RATE / CHUNK * RECORD_SECONDS):
data = stream.read(CHUNK)
frames.append(data) # 2 bytes(16 bits) per channel
i += 1
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def clearTextbox(txt):
txt['state'] = tk.NORMAL
txt.delete('1.0', tk.END)
txt['state'] = tk.DISABLED
def openTask(num, txt):
txt['state'] = tk.NORMAL
txtname = 'variants/' + chosen_var + '/' + num + '.txt'
imgname = 'variants/' + chosen_var + '/' + num + '.jpg'
for i in fileinput.input(txtname):
txt.insert(tk.END, i)
fimg = Image.open(imgname)
img = ImageTk.PhotoImage(fimg)
txt.image_create(tk.END, image=img)
txt.image = img
txt['state'] = tk.DISABLED
#
#Timer section
#
def timer_start_pause(root, tmr, label, txt):
global timer_running
timer_running = not timer_running
if timer_running:
timer_tick(root, tmr, label, txt)
def timer_reset(tmr, label):
global timer_running, timer_seconds
timer_running = False
timer_seconds = timer_listsec[c]
label.config(text=timer_labels[c])
show_timer(tmr)
def timer_tick(root, tmr, label, txt): # the most meanigful function(must be really carefull)
global timer_seconds, c, total, flag_next
t = threading.Thread(target=lambda:voiceRecorder(numbers[total - 1])) # recording in another thread
if timer_running and timer_seconds and flag_next:
tmr.after(1000, lambda:timer_tick(root, tmr, label, txt))
timer_seconds -= 1
show_timer(tmr)
elif (c < 2) and (total < 11): # change to next condition
flag_next = True
c += 1
total += 1
timer_reset(tmr, label)
timer_start_pause(root, tmr, label, txt)
#depending on relative counter of conditions we are openning next task or starting recording
if c == 1:
openTask(numbers[total], txt) # opens next task
if c == 2:
t.start() # starts recording
elif (c == 2) and (total < 11): # zero relative counter of conditions. Go to next iteration of conditions
flag_next = True
clearTextbox(txt)
c = 0
total += 1
timer_reset(tmr, label)
timer_start_pause(root, tmr, label, txt)
elif total == 11: # in the end just switching off
exitAll(root)
def show_timer(tmr):
m = timer_seconds // 60
s = timer_seconds - m * 60
tmr['text'] = '%02d:%02d' % (m, s)
#
#Main section
#
def nextTask():
global flag_next, flag_voice
flag_next = False
flag_voice = False
def showMain(root):
root.deiconify()
root['bg'] = "#ffffff"
root.attributes('-fullscreen', 1)
root.focus_force()
root.overrideredirect(1)
label1 = tk.Label(root, text="Your task is:")
label1['bg'] = "#ffffff"
#label1.place(x=50, y=10)
label1.grid(row=0, column=0)
txt1 = tk.Text(master=root, state=tk.DISABLED, font='Monospace 16', width=int(root.winfo_screenwidth())//15, height=int(root.winfo_screenheight())//27, wrap=tk.WORD)
txt1['bg'] = "#ffc299"
#txt1 = tk.Text(master=root, font='Monospace 16', width=122, height=40, wrap=tk.WORD)
#txt1.place(x=50, y=30)
txt1.grid(row=1, column=0)
tmr = tk.Label(root, font='Monospace 30')
tmr['bg'] = "#ffffff"
#tmr.place(x=1600, y=30)
tmr.grid(row=0, column=1)
show_timer(tmr)
label2 = tk.Label(root, text=timer_labels[c], font='18')
label2['bg'] = "#ffffff"
#label2.place(x=1600, y=90)
label2.grid(row=1, column=1, columnspan=2, sticky=tk.N)
timer_start_pause(root, tmr, label2, txt1)
btn = tk.Button(root, state=tk.NORMAL, text="Next task", width=15, height=3, command=nextTask)
btn['bg'] = "#ff6600"
#btn.pack(side="right")
btn.grid(row=1, column=3, columnspan=4, rowspan=2, sticky=tk.W+tk.S)
#
#Variant section
#
def gotoMain(root, varform):
varform.destroy()
showMain(root)
def select_item(event, arg):
value = (arg.get(arg.curselection()))
global chosen_var
chosen_var = value[-1]
def get_items():
items = []
flist = open("info_vars.txt", "r")
for line in flist:
items.append("var " + line)
return items
def showVariant(root):
varform = tk.Toplevel(root)
varform['bg'] = "#ffffff"
varform.wm_attributes("-type", "splash")
varform.geometry('400x180+%d+%d' % (root.winfo_screenwidth()//2-200, root.winfo_screenheight()//2-90))
varform.title("Audream v0.9")
varform.focus_force()
listbox = tk.Listbox(varform, width=10, height=2, font=('13'))
listbox['bg'] = "#ffc299"
listbox.bind('<<ListboxSelect>>', lambda event, arg=listbox:select_item(event, arg))
listbox.place(x=150, y=20)
listbox_items = get_items()
for item in listbox_items:
listbox.insert(tk.END, item)
btn1 = tk.Button(varform, text="Continue", width=15, height=3, command=lambda:gotoMain(root, varform))
btn1['bg'] = "#ff6600"
btn1.place(x=50, y=80)
btn2 = tk.Button(varform, text="Exit", width=15, height=3, command=lambda:exitAll(root))
btn2['bg'] = "#ff6600"
btn2.place(x=200, y=80)
#
#Registration section
#
def gotoVar(root, regform, e1, e2):
global fname
s = e1.get().split(' ')
s1 = e2.get()
if (s == '') or (s1 == ''):
openErr(regform, "entry field is empty")
return
fname += s1 + s[0] + s[1]
regform.destroy()
showVariant(root)
def showReg(root):
regform = tk.Toplevel(root)
regform.wm_attributes("-type", "splash")
regform.geometry('400x180+%d+%d' % (root.winfo_screenwidth()//2-200, root.winfo_screenheight()//2-90))
regform.title("Audream v0.9")
regform['bg'] = "#ffffff"
#regform.winfo_toplevel().title
label1 = tk.Label(regform, text="Enter your first and last name")
label1.place(x=50, y=10)
label1['bg'] = "#ffffff"
e1 = tk.Entry(regform)
e1.place(x=225, y=10)
e1.focus_set()
label2 = tk.Label(regform, text="Enter the number of your group")
label2.place(x=50, y=40)
label2['bg'] = "#ffffff"
e2 = tk.Entry(regform)
e2.place(x=225, y=40)
btn1 = tk.Button(regform, text="Continue", width=15, height=3, command=lambda:gotoVar(root, regform, e1, e2))
btn1['bg'] = "#ff6600"
btn1.place(x=50, y=80)
btn2 = tk.Button(regform, text="Exit", width=15, height=3, command=lambda:exitAll(root))
btn2['bg'] = "#ff6600"
btn2.place(x=200, y=80)
#
#Advert section
#
def gotoReg(root, advform):
advform.destroy()
showReg(root)
def setConnection(advform):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect(("192.168.43.110",9999))
except socket.error:
openErr(advform, "no connection with server")
else:
code = cl.update_base(s)
s.close()
if code:
openWarn(advform, "connection was lost before updating")
def showAdv(root):
advform = tk.Toplevel(root)
advform.geometry('408x400+%d+%d' % (root.winfo_screenwidth()//2-204, root.winfo_screenheight()//2-200))
advform.overrideredirect(1)
txt = tk.Text(advform, width=408, height=400)
txt.pack()
imgname = "./logo.png"
fimg = Image.open(imgname)
img = ImageTk.PhotoImage(fimg)
txt.image_create(tk.END, image=img)
txt.image = img
advform.after(5000, lambda:gotoReg(root, advform))
threading.Thread(target=lambda:setConnection(advform)).start()
|
test_gc.py
|
import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads,
temp_dir, requires_type_collecting)
from test.support.script_helper import assert_python_ok, make_script
import sys
import time
import gc
import weakref
import threading
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
@requires_type_collecting
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
lcdDisplay.py
|
#!/usr/bin/env python
import lcddriver
from time import *
import pika
import sys
from threading import Thread
import threading
import time
import logging
import os
lcd = None
rabbitMqHost = os.environ['RABBIT_MQ_HOST']
rabbitMqQueue = os.environ['RABBIT_MQ_QUEUE']
demoMode = eval(os.environ.get('DEMO_MODE', False))
rowData = [" "," "," "," "," "]
connection = pika.BlockingConnection
# -------------------------------------------------------------------------------------------------------
def writeDataToDisplay():
global lcd
global rowData
global demoMode
logging.info("writeDataToDisplay Thread startet...")
ct = threading.currentThread()
lcd = lcddriver.lcd()
while getattr(ct, "do_checking", True):
if not demoMode:
lcd.lcd_clear()
else:
logging.info("----------------------------------")
rowCount = 0
for row in rowData:
if not demoMode:
lcd.lcd_display_string(row, rowCount)
else:
logging.info("Write: " + row + " on LCD row:" + str(rowCount))
rowCount +=1
time.sleep(10)
# -------------------------------------------------------------------------------------------------------
def openConnection():
global connection
global rabbitMqHost
global rabbitMqQueue
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitMqHost))
channel = connection.channel()
channel.basic_consume(queue=rabbitMqQueue,on_message_callback=callback)
logging.info("Waiting for Messages on Queue:" + rabbitMqQueue)
channel.start_consuming()
# -------------------------------------------------------------------------------------------------------
def closeConnection():
global connection
connection.close()
# -------------------------------------------------------------------------------------------------------
def callback(ch, method, properties, body):
global rabbitMqQueue
global rowData
try:
ch.basic_ack(delivery_tag=method.delivery_tag)
logging.debug("Message arrived")
stringBody = "".join(map(chr, body))
if stringBody.find("=") != -1:
if demoMode:
logging.info("Rceived:" + stringBody)
row = stringBody[0:stringBody.find("=")]
rowData[int(row)] = stringBody[stringBody.find("=")+1:]
else:
logging.info("Message on Channel: " + rabbitMqQueue +
" has an unexpectedly Format -> expecting ROW=DATA")
except BaseException as error:
logging.error(error)
# -------------------------------------------------------------------------------------------------------
def main():
global lcd
logging.basicConfig(level=logging.INFO)
logging.info("---------------------------------------------")
rowData[0] = " "
rowData[1] = os.environ.get('WELCOME_TEXT', "booting...")
rowData[2] = " "
rowData[3] = " "
if not demoMode:
logging.info("LCD initialised...")
lcd = lcddriver.lcd()
lcd.lcd_clear()
# Start anf join the output Thread
outputThread = Thread(target=writeDataToDisplay)
outputThread.join()
outputThread.daemon = True
outputThread.start()
else:
logging.info("lcdDisplay Connector Started in DEMO MODE")
openConnection()
logging.info('Finished')
closeConnection()
logging.info("---------------------------------------------")
if __name__ == '__main__':
try:
main()
except:
logging.info("---------------------------------------------")
logging.info("-- CRITICAL ERROR OCCURED...")
logging.info("---------------------------------------------")
sys.exit(2)
|
ssh_cpython_backend.py
|
"""Intermediate process for communicating with the remote Python via SSH"""
import os.path
import sys
import threading
from threading import Thread
import thonny
from thonny.backend import (
SshMixin,
BaseBackend,
interrupt_local_process,
RemoteProcess,
ensure_posix_directory,
)
from thonny.common import (
serialize_message,
ImmediateCommand,
EOFCommand,
InputSubmission,
CommandToBackend,
MessageFromBackend,
)
class SshCPythonBackend(BaseBackend, SshMixin):
def __init__(self, host, user, password, interpreter, cwd):
SshMixin.__init__(self, host, user, password, interpreter, cwd)
self._upload_main_backend()
self._proc = self._start_main_backend()
self._main_backend_is_fresh = True
self._response_lock = threading.Lock()
self._start_response_forwarder()
BaseBackend.__init__(self)
def _handle_eof_command(self, msg: EOFCommand) -> None:
self._forward_incoming_command(msg)
def _handle_user_input(self, msg: InputSubmission) -> None:
self._forward_incoming_command(msg)
def _handle_normal_command(self, cmd: CommandToBackend) -> None:
if cmd.name[0].isupper():
if "expected_cwd" in cmd:
self._cwd = cmd["expected_cwd"]
self._restart_main_backend()
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is not None:
# SFTP methods defined in SshMixin
try:
response = handler(cmd)
except Exception as e:
response = {"error": str(e)} # TODO:
self.send_message(self._prepare_command_response(response, cmd))
else:
# other methods running in the remote process
self._forward_incoming_command(cmd)
def _handle_immediate_command(self, cmd: ImmediateCommand) -> None:
SshMixin._handle_immediate_command(self, cmd)
# It is possible that there is a command being executed both in the local and remote process,
# interrupt them both
with self._interrupt_lock:
interrupt_local_process()
self._proc.stdin.write("\x03")
def send_message(self, msg: MessageFromBackend) -> None:
with self._response_lock:
super().send_message(msg)
def _forward_incoming_command(self, msg):
msg_str = serialize_message(msg, 1024)
for line in msg_str.splitlines(keepends=True):
self._proc.stdin.write(line)
self._proc.stdin.flush()
self._proc.stdin.write("\n")
def _start_response_forwarder(self):
self._response_forwarder = Thread(target=self._forward_main_responses, daemon=True)
self._response_forwarder.start()
def _forward_main_responses(self):
while self._should_keep_going():
line = self._proc.stdout.readline()
if self._main_backend_is_fresh and self._looks_like_echo(line):
# In the beginning the backend may echo commands sent to it (perhaps this echo-avoiding trick
# takes time). Don't forward those lines.
continue
if not line:
break
with self._response_lock:
sys.stdout.write(line)
sys.stdout.flush()
self._main_backend_is_fresh = False
def _looks_like_echo(self, line):
return line.startswith("^B")
def _should_keep_going(self) -> bool:
return self._proc is not None and self._proc.poll() is None
def _start_main_backend(self) -> RemoteProcess:
env = {"THONNY_USER_DIR": "~/.config/Thonny", "THONNY_FRONTEND_SYS_PATH": "[]"}
self._main_backend_is_fresh = True
return self._create_remote_process(
[self._remote_interpreter, "-m", "thonny.plugins.cpython_backend", self._cwd],
cwd=self._get_remote_program_directory(),
env=env,
)
def _restart_main_backend(self):
self._proc.kill()
self._proc = None
self._response_forwarder.join()
self._proc = self._start_main_backend()
self._start_response_forwarder()
def _get_remote_program_directory(self):
return "/tmp/thonny-backend-" + thonny.get_version()
def _upload_main_backend(self):
launch_dir = self._get_remote_program_directory()
if self._get_stat_mode_for_upload(launch_dir) and not launch_dir.endswith("-dev"):
# don't overwrite unless in dev mode
return
ensure_posix_directory(
launch_dir + "/thonny/plugins/cpython_backend",
self._get_stat_mode_for_upload,
self._mkdir_for_upload,
)
import thonny.ast_utils
import thonny.jedi_utils
import thonny.backend
import thonny.common
import thonny.plugins.cpython_backend
local_context = os.path.dirname(os.path.dirname(thonny.__file__))
for local_path in [
thonny.__file__,
thonny.common.__file__,
thonny.ast_utils.__file__,
thonny.jedi_utils.__file__,
thonny.backend.__file__,
thonny.plugins.cpython_backend.__file__,
thonny.plugins.cpython_backend.__file__.replace("__init__", "__main__"),
]:
local_suffix = local_path[len(local_context) :]
remote_path = launch_dir + local_suffix.replace("\\", "/")
self._perform_sftp_operation_with_retry(lambda sftp: sftp.put(local_path, remote_path))
def create_empty_cpython_init(sftp):
with sftp.open(thonny.plugins.cpython_backend.__file__, "w") as fp:
fp.close(self._perform_sftp_operation_with_retry(create_empty_cpython_init))
|
app.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Shanda Lau 刘祥德
@license: (C) Copyright 2019-now, Node Supply Chain Manager Corporation Limited.
@contact: shandalaulv@gmail.com
@software:
@file: __init__.py.py
@time: 2020/8/8 16:11
@version 1.0
@descwerkzeug:
"""
from workers import create_mast_worker, create_dist_worker, create_cast_worker
from sockets import socketio
from config import Config
from api import blueprint as api
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_cors import CORS
from flask import Flask
import argparse
import eventlet
eventlet.monkey_patch(os=False, thread=False)
def create_app():
flask = Flask(__name__,
static_url_path='',
static_folder='dist')
# flask.config['SECRET_KEY'] = 'secret!'
# mount all blueprints from api module.
flask.wsgi_app = ProxyFix(flask.wsgi_app)
flask.register_blueprint(api)
socketio.init_app(
flask, message_queue=Config.REDIS_SOCKET_URL, cors_allowed_origins="*")
CORS(flask)
return flask
def register_model_workers():
"""create daemon process for each model while persistent service need be running in the backend.
This function will create model process handlers and destroy events.
The destroy event is a multiprocessing event, which is to notify model process terminated running loop and exits from service.
The worker handler is a thread future, which monitors daemon thread status. The handler will be
blocked when wait() is called until the daemon process exit. After daemon thread exits from
watching, the handler joins instantly, system recycle resource.
Returns:
[type]: [description]
"""
handlers = []
destroy_events = []
mast_worker_handler, mast_destroy = create_mast_worker()
cast_worker_handler, cast_destroy = create_cast_worker()
dist_worker_handler, dist_destroy = create_dist_worker()
handlers.append(mast_worker_handler)
handlers.append(cast_worker_handler)
handlers.append(dist_worker_handler)
destroy_events.append(mast_destroy)
destroy_events.append(cast_destroy)
destroy_events.append(dist_destroy)
return handlers, destroy_events
def wait_model_workers_exit(handlers, destroy_events):
for d in destroy_events:
d.set()
for h in handlers:
h.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default='0.0.0.0',
help='ip address of flask server in local network.')
parser.add_argument('--port', type=int, default=5000,
help='listening port of flask server in local network.')
parser.add_argument('--debug', type=bool, default=False,
help='listening port of flask server in local network.')
args = parser.parse_args()
# mast_server = MastServer()
# Process(target=mast_server.run, args=(send_queue, res_queue,)).start()
app = create_app()
handlers, events = register_model_workers()
socketio.run(app=app, host=args.host, port=args.port, debug=args.debug)
wait_model_workers_exit(handlers, events)
# logger = logging.getLogger('gunicorn.error')
# app.logger.handlers = logger.handlers
# app.logger.setLevel(logger.level)
# @app.route('/', defaults={'path': ''})
# @app.route('/<path:path>')
# def index(path):
# if app.debug:
# return requests.get('http://frontend:8080/{}'.format(path)).text
# return app.send_static_file('index.html')
# flask app is recommended to be incorporated with gunicorn framework in production environment.
# gunicorn -c webserver/gunicorn_config.py webserver:app --no-sendfile
# but if you are testing in development environment, execute app.run() should be fine.
# app.run()
|
Code-07-Advance_Official_ThreadName_ParllelExec.py
|
'''
DEVELOPER NAME : BALAVIGNESH.M
IMPLEMENTED DATE: 16-11-2018
'''
import threading
import time
class serviceThread:
@staticmethod
def serviceThreadName():
print(threading.currentThread().getName(),'Starting....')
time.sleep(2)
print(threading.currentThread().getName(),'Exiting....')
class securityThread:
@staticmethod
def securityThreadName():
print(threading.currentThread().getName(),'Strating....')
time.sleep(2)
print(threading.currentThread().getName(),'Exiting....')
service = serviceThread()
security = securityThread()
tim = threading.Thread(target=service.serviceThreadName)
service_thread = threading.Thread(name="ServiceThread",target=service.serviceThreadName)
security_thread = threading.Thread(name="securitythread",target=security.securityThreadName)
service_thread.start()
security_thread.start()
tim.start()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import sys
from threading import Lock
from tempfile import NamedTemporaryFile
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = Lock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
self._conf = conf or SparkConf(_jvm=self._jvm)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
# disable randomness of hash of string in worker, if this is not
# launched by spark-submit
self.environment["PYTHONHASHSEED"] = "0"
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jsc.accumulator(
self._jvm.java.util.ArrayList(),
self._jvm.PythonAccumulatorParam(host, port))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = set()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway()
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transforamtion. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
self._jsc.stop()
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
serializer.dump_stream(c, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
return RDD(jrdd, self, serializer)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
NOTE: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
Note: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path)
def clearFiles(self):
"""
Clear the job's list of files added by L{addFile} or L{addPyFile} so
that they do not get downloaded to any new nodes.
"""
# TODO: remove added .py or .zip files from the PYTHONPATH?
self._jsc.sc().clearFiles()
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions,
allowLocal)
return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
self.profiler_collector.show_profiles()
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
self.profiler_collector.dump_profiles(path)
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
sudos.py
|
import asyncio
import html
import io
import os
import re
import sys
import traceback
import humanfriendly
import time
from contextlib import redirect_stdout
from typing import Union
import speedtest
from meval import meval
from pyrogram import (
Client,
filters,
__version__ as pyrogram_version,
)
from pyrogram.errors import RPCError
from pyrogram.raw.all import (
layer as pyrogram_layer,
)
from pyrogram.types import Message
from alisu import (
__version__ as alisu_version,
)
from alisu.database import (
groups,
users,
notes,
filters as dbfilters,
)
from alisu.utils import sudofilter
from alisu.utils.localization import use_chat_lang
from threading import Thread
prefix: Union[list, str] = "!"
async def restartbot(c: Client):
await c.stop()
args = [sys.executable, "-m", "alisu"]
os.execv(sys.executable, args)
@Client.on_message(filters.command("sudos", prefix) & sudofilter)
async def sudos(c: Client, m: Message):
await m.reply_text("Test")
@Client.on_message(filters.command("cmd", prefix) & sudofilter)
@use_chat_lang()
async def run_cmd(c: Client, m: Message, strings):
cmd = m.text.split(maxsplit=1)[1]
if re.match("(?i)poweroff|halt|shutdown|reboot", cmd):
res = strings("forbidden_command")
else:
proc = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
res = (
"<b>Output:</b>\n<code>{}</code>".format(
html.escape(stdout.decode().strip())
)
if stdout
else ""
) + (
"\n<b>Errors:</b>\n<code>{}</code>".format(
html.escape(stderr.decode().strip())
)
if stderr
else ""
)
await m.reply_text(res)
@Client.on_message(filters.command("upgrade", prefix) & sudofilter)
@use_chat_lang()
async def upgrade(c: Client, m: Message, strings):
sm = await m.reply_text("Upgrading sources...")
proc = await asyncio.create_subprocess_shell(
"git pull --no-edit",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
stdout = (await proc.communicate())[0]
if proc.returncode == 0:
if "Already up to date." in stdout.decode():
await sm.edit_text("There's nothing to upgrade.")
else:
await sm.edit_text(strings("restarting"))
Thread(target=asyncio.run, args=(restartbot(c),)).start()
await sm.edit_text("done")
else:
await sm.edit_text(
f"Upgrade failed (process exited with {proc.returncode}):\n{stdout.decode()}"
)
proc = await asyncio.create_subprocess_shell("git merge --abort")
await proc.communicate()
@Client.on_message(filters.command("eval", prefix) & sudofilter)
async def evals(c: Client, m: Message):
text = m.text.split(maxsplit=1)[1]
try:
res = await meval(text, globals(), **locals())
except: # skipcq
ev = traceback.format_exc()
output_eval_one = f"<code>{html.escape(ev)}</code>"
if len(output_eval_one) > c.tg_max_text_msg_len:
bio = io.BytesIO(str(ev).encode())
bio.name: str = "eval.txt"
await m.reply_document(bio)
else:
await m.reply_text(output_eval_one)
else:
try:
output_eval_msg_two_txt = f"<code>{html.escape(str(res))}</code>"
if len(output_eval_msg_two_txt) > c.tg_max_text_msg_len:
bio = io.BytesIO(str(res).encode())
bio.name: str = "eval.txt"
await m.reply_document(bio)
else:
await m.reply_text(output_eval_msg_two_txt)
except Exception as e: # skipcq
output_eval_e = str(e)
if len(output_eval_e) > c.tg_max_text_msg_len:
bio = io.BytesIO(str(output_eval_e).encode())
bio.name: str = "eval.txt"
await m.reply_document(bio)
else:
await m.reply_text(output_eval_e)
@Client.on_message(filters.command("exec", prefix) & sudofilter)
async def execs(c: Client, m: Message):
strio = io.StringIO()
code = m.text.split(maxsplit=1)[1]
exec(
"async def __ex(c, m): " + " ".join("\n " + l for l in code.split("\n"))
) # skipcq: PYL-W0122
with redirect_stdout(strio):
try:
await locals()["__ex"](c, m)
except: # skipcq
trace_format_exc_exec_cmd = traceback.format_exc()
msg_out_one = html.escape(trace_format_exc_exec_cmd)
if len(msg_out_one) <= c.tg_max_text_msg_len:
return await m.reply_text(msg_out_one)
bio = io.BytesIO(str(trace_format_exc_exec_cmd).encode())
bio.name: str = "exec.txt"
return await m.reply_document(bio)
if strio.getvalue().strip():
out_one = strio.getvalue()
out = f"<code>{html.escape(out_one)}</code>"
else:
out_one = "Command executed."
out = "Command executed."
if len(out) > c.tg_max_text_msg_len:
bio = io.BytesIO(str(out_one).encode())
bio.name: str = "exec.txt"
await m.reply_document(bio)
else:
await m.reply_text(out)
@Client.on_message(filters.command("speedtest", prefix) & sudofilter)
@use_chat_lang()
async def test_speed(c: Client, m: Message, strings):
string = strings("speedtest")
sent = await m.reply_text(string.format(host="", ping="", download="", upload=""))
s = speedtest.Speedtest()
bs = s.get_best_server()
await sent.edit_text(
string.format(
host=bs["sponsor"], ping=int(bs["latency"]), download="", upload=""
)
)
dl = round(s.download() / 1024 / 1024, 2)
await sent.edit_text(
string.format(
host=bs["sponsor"], ping=int(bs["latency"]), download=dl, upload=""
)
)
ul = round(s.upload() / 1024 / 1024, 2)
await sent.edit_text(
string.format(
host=bs["sponsor"], ping=int(bs["latency"]), download=dl, upload=ul
)
)
@Client.on_message(filters.command("restart", prefix) & sudofilter)
@use_chat_lang()
async def restart(c: Client, m: Message, strings):
sent = await m.reply_text(strings("restarting"))
Thread(target=asyncio.run, args=(restartbot(c),)).start()
await sent.edit_text("done")
@Client.on_message(filters.command("leave", prefix) & sudofilter)
async def leave_chat(c: Client, m: Message):
if len(m.command) == 1:
try:
await c.leave_chat(m.chat.id)
except RPCError as e:
print(e)
else:
chat_id = m.text.split(maxsplit=1)[1]
try:
await c.leave_chat(int(chat_id))
except RPCError as e:
print(e)
@Client.on_message(filters.command(["bot_stats", "stats"], prefix) & sudofilter)
async def getbotstats(c: Client, m: Message):
users_count = await users.all().count()
groups_count = await groups.all().count()
filters_count = await dbfilters.all().count()
notes_count = await notes.all().count()
bot_uptime = round(time.time() - c.start_time)
bot_uptime = humanfriendly.format_timespan(bot_uptime)
await m.reply_text(
"<b>Bot statistics:</b>\n\n"
f"<b>Users:</b> {users_count}\n"
f"<b>Groups:</b> {groups_count}\n"
f"<b>Filters:</b> {filters_count}\n"
f"<b>Notes:</b> {notes_count}\n"
f"<b>Pyrogram Version:</b> {pyrogram_version} (Layer {pyrogram_layer})\n"
f"<b>Bot Version:</b> {alisu_version} ({c.version_code})\n"
f"<b>Uptime:</b> {bot_uptime}"
)
@Client.on_message(filters.command("chat", prefix) & sudofilter)
async def getchatcmd(c: Client, m: Message):
if len(m.text.split()) > 1:
targetchat = await c.get_chat(m.command[1])
if targetchat.type != "private":
await m.reply_text(
f"<b>Title:</b> {targetchat.title}\n<b>Username:</b> {targetchat.username}\n<b>Link:</b> {targetchat.invite_link}\n<b>Members:</b> {targetchat.members_count}"
)
else:
await m.reply_text("This is a private Chat.")
else:
await m.reply_text("You must specify the Chat.")
|
android.py
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This contains useful functions for talking to android devices and getting
information about .apks'''
import collections
import os
import subprocess
import time
from threading import Thread
from Queue import Queue, Empty
def adb(params, program_args):
'''Runs a single command through ADB.
Arguments:
params: A list of the parameters to pass to adb
program_args: The arguments to this program.
program_args must contain a .verbose member.
If program_args.verbose is true, then the command and the output is
printed,
otherwise no output is present.
'''
args = ['adb']
args.extend(params)
if program_args.verbose:
print args
subprocess.check_call(args)
else:
subprocess.check_call(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def adb_stream(params, program_args):
'''Runs a single command through ADB, returns the process with stdout
redirected.
Arguments:
params: A list of the parameters to pass to adb
program_args: The arguments to this program.
program_args must contain a .verbose member.
If program_args.verbose is true, then the command is printed.
'''
args = ['adb']
args.extend(params)
if program_args.verbose:
print args
return subprocess.Popen(args, stdout=subprocess.PIPE)
def install_apk(apk_info, program_args):
'''Installs an apk.
Overwrites existing APK if it exists and grants all permissions.
program_args must have a .verbose member.
'''
adb(['install', '-r', '-g', apk_info.apk_name], program_args)
adb(['shell', 'pm', 'grant', apk_info.package_name,
'android.permission.WRITE_EXTERNAL_STORAGE'], program_args)
def get_apk_info(apk):
"""Returns a named tuple (test_name, package_name, activity_name) for the
given apk."""
test_name = os.path.splitext(os.path.basename(apk))[0]
package_name = 'com.example.test.' + test_name
activity_name = 'android.app.NativeActivity'
apk_info = collections.namedtuple(
'ApkInfo', ['test_name', 'package_name', 'activity_name', 'apk_name'])
return apk_info(test_name, package_name, activity_name, apk)
def watch_process(silent, program_args, other_proc=None):
''' Watches the output of a running android process.
Arguments:
silent: True if output should be consumed, false otherwise
program_args: The arguments passed to the program.
program_args must contain a .verbose member.
if other_proc != None, then it will also be
waited for. If either process ends, the other is killed.
It is expected that the log was cleared before the process started.
Returns the return code that it produced
'''
proc = adb_stream(
['logcat', '-s', '-v', 'brief', 'VulkanTestApplication:V'],
program_args)
return_value = 0
q = Queue()
def read_lines(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
queue.put(None)
t = Thread(target=read_lines, args=(proc.stdout, q))
t.daemon = True
t.start()
while True:
if other_proc:
val = other_proc.poll()
if val != None:
proc.kill()
return val
try:
line = q.get_nowait()
except Empty:
time.sleep(0.1)
continue
if line != None:
if 'beginning of crash' in line:
print '**Application Crashed**'
proc.kill()
return -1
split_line = line.split(':')[1:]
if not split_line:
continue
line_text = ':'.join(split_line)[1:]
if split_line and split_line[0] == ' RETURN':
if program_args.verbose:
print line_text,
return_value = int(split_line[1])
break
if program_args.verbose or not silent:
print line_text,
proc.kill()
return return_value
|
pymmw.py
|
#!/bin/sh
'''which' python3 > /dev/null && exec python3 "$0" "$@" || exec python "$0" "$@"
'''
#
# Copyright (c) 2018, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# goto pymmw
#
import os
import sys
import glob
import serial
import threading
import json
import argparse
import signal
import platform
import time
from lib.shell import *
from lib.probe import *
from lib.carrier import *
# ------------------------------------------------
def _init_(data, fw):
# Testprint
#print("\nGehe in _init\n")
global mss
# Testprint
#print("\n!!!! -> Bin in _init_ und printe fw: " + str(fw))
if len(data) > 0 and mss is None:
# Testprint
print("\nBin in _init_, Länge der Daten ist: " + str(len(data)) + "!!!\n")
for item in fw:
mss = __import__(item, fromlist=('',))
# Testprint
print("!!!! -> Bin in der for-Schleife von _init_ und printe mss nach dem import der Firmware! mss: " +
str(mss) + ", vom Typ: " + str(type(mss)) + "!!!\n")
if len(mss._read_(data, open(os.devnull, "w"))) > 1:
# Testprint
#print("\n!!! pymmw.py _init_ TRUE !!!\n")
print(mss._read_(data, open(os.devnull, "w"))) # hier wird von mss das entsprechende dev (xWR64xx) geprintet
return True
mss = None
# Testprint
#print("\nBin in _init_ und printe den Rückgabewert: False\n")
return False
def _read_(prt, dat, timeout=2, handle=None): # observe control port and call handler when firmware is recognized
"""
myDoc:
This function is called as a Thread in the _main_ of pymmw.py.
"""
# Testprint
#print("\n--- Bin in _read_ von pymmw!!!\n")
# script_path get the path of the directory of pymmw.py
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
# Testprint
#print("\nAus _read_ printe script_path:\n" + script_path)
# In fw the names of .py files in the \pymmw\source\mss directory are getting saved in these steps.
fw = [os.path.splitext(item)[0].split(os.sep) for item in glob.glob(os.sep.join((script_path, 'mss', '*.py')))]
fw = ['.'.join(mss[-2:]) for mss in fw]
# Testprint
#print("\nPrinte fw aus _read_:\nfw = " + str(fw))
cnt = 0
ext = {}
try:
if len(fw) == 0:
raise Exception('no handlers found')
t = time.time()
# Testprint
#print("\nPrinte t (time.time) aus Thread _read_.\nt= " + str(time.asctime(time.localtime(t))) + "\n")
data = ''
reset = None
while handle is None:
# The serial connection 'prt' is read out. readline() read until EOL (end-of-line) character
# terminate reading process. decode() returns a decoded string (type str).
# (myDoc)
data = prt.readline().decode('latin-1')
# Testprint
#print(data)
if _init_(data, fw): # firmware identified
# Testprint
print("\n!!!! -> Bin in if '_init_' vom Thread '_read_' !!!!\n")
# Testprint
print("\n!!!! -> DATAPRINT! Printe die Daten, die jetzt da sind:\n" + str(data))
handle = data.strip() # removes spaces from decoded string
# Testprint
print("!!!! -> handle hat einen neuen Wert: " + str(handle) + " <-----!!!!\n")
break
else:
# (myDoc) Hier bleibt das Programm scheinbar solange, bis nrst betätigt wird,
# da noch keine Daten über den Control-Port (COM4) laufen
print(data, end='', flush=True)
# Testprint
print("\n_init_ ist False\n")
# Scheinbar geht das Programm niemals in die nachfolge if-Schleife, da timeout als 'None' gesetzt ist.
# Ich glaube, dass das hier nur verwendet wird, wenn man DCA1000 oder mmWaveBost benutzt.
# MMWAVEBOST AND DCA1000 ONLY ----> (*FTDI_USB, check carrier.py)
if timeout is not None:
if time.time() - timeout > t:
car = usb_discover(*FTDI_USB)
if len(car) > 0 and not reset:
reset = time.time()
if not ftdi_reset(*FTDI_USB):
raise Exception('carrier not supported')
t = reset
continue
raise Exception('no handler found')
# End of while
# The while-loop ends when nrst is pressed, data comes over control-port (COM4), mss gets name of firmware-file,
# and handle is not False
# Testprint
print("\nStatus von mss nachdem handle nicht mehr None ist: " + str(mss) + "\n")
if mss is None:
if not _init_(handle, fw):
raise Exception('handler not supported')
reset = None
while True:
buf = mss._read_(data) # die Funktion gibt das Gerät zurück (in meinem Fall xWR64xx), warum nicht xWR68xx ?
# Testprint
#print("\n!!! -> Handle nicht mehr None und bin jetzt in der unteren while-Schleife von _read_")
#print("!!! -> pymmw.py _read_: Inhalt von data, mit dem mss ausgelesen wird: " + str(data))
#print("!!! -> Ich printe jetzt 'buf': " + str(buf) + " <- !!!\n")
if len(buf) < 2:
if reset: # reset detected
handler = os.path.splitext(mss.__file__.split(os.sep)[-1])[0]
# Testprint
print("\nBin in der unteren While-Schleife von _read_ und gehe in die 'if len(buf) < 2'-Schleife" +
" und der handler wird ausgelsen! handler: " + str(handler) + "\n")
print_log('handler:', handler, '-', 'configuration:', reset) # (i) handler: x8_mmw - configuration: xWR64xx
cnt += 1
file = open('{}/{}-{}.{}'.format('mss', handler, reset, 'cfg'), 'r') # hier bastelt der sich mss/x8_mmw-xWR64xx.cfg zusammen
# Testprint
print("File soll geladen werden: " + str(file))
content = load_config(file)
# Testprint
#print("Folgender Content soll geladen werden:\n" + str(content))
cfg = json.loads(content) # hier lädt der die x8_mmw-xWR64xx.cfg Datei (JSON)
# Test
#print("Folgende Configuration soll geladen werden:\n" + str(cfg))
cfg, par = mss._conf_(cfg)
# holt sich hier aus der cfg die entsprechenden Werte raus und löscht einige Einträge und schreibt
# die "neue" cfg dann in die Variable cfg (überschreibt diese). In der Variable 'par' werden Werte
# reingeschrieben, die aus der 'cfg' genommen mit Hilfe dieser berechnet wurden ('loglin', 'fftcomp', 'rangebias')
mss._init_(prt, dev, cfg, dat) # °!A<"S§hier wird Thread gestartet, der den Datenport ausliest (prt,dev,cfg werden gar nicht benutzt)
mss._proc_(cfg, par)
send_config(prt, cfg, mss._read_)
show_config(cfg)
reset = None # reset ist zuvor xWR64xx und wird hier zurückgesetzt, sodass diese if-Schleife erstmal nicht betreten wird
# Testprint
print("\npymmw.py _read_ unten -> RESET WIRD ZURÜCKGESETZT!!!!")
else:
# Testprint
print("\npymmw.py _read_ in letzter ELSE: buf hat eine Länge größer als 2,bin daher in der else und , printe reset = buf = " + str(buf))
reset = buf
data = prt.readline().decode('latin-1')
# Testprint
print("\nPrinte data ganz unten in pymmw.py _read_: " + str(data) + " | (Ende)")
except Exception as e:
print_log(e, sys._getframe())
os._exit(1)
def _input_(prt): # accept keyboard input and forward to control port
"""
Diese Funktion ist scheinbar dafür da, Eingaben über die Tastur zu empfangen und diese an der Sensor
zu senden. Um ein CLI Kommando für den Sensor zu sein, müssen die Eingaben mit einem % beginnen.
"""
# Testprint
print("\n--- Bin in _input_ von pymmw!!!\n")
while not sys.stdin.closed:
line = sys.stdin.readline()
if not line.startswith('%'):
prt.write(line.encode())
# ------------------------------------------------
if __name__ == "__main__":
# Testprint
print("\nBetrete __name__ = __main__\n")
#
signal.signal(signal.SIGINT, signal.SIG_DFL)
nrst = 'Windows' not in platform.system()
# Testprint
print("\nIch printe VARIABLE nrst aus __main__: " + str(nrst) + "\n")
try:
# Testprint
#print("\nIch betrete in __name__ die try Anweisung\n")
# Das hier ist dafür da, um das Programm aus eine Shell starten zu können.
# Es werden die Argumente generiert, die eingesetzt werden, um die Ports zu definieren
parser = argparse.ArgumentParser(description='pymmw', epilog='', add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--control-port', help='serial port for control communication', required=not nrst or '-n' in sys.argv or '--no-discovery' in sys.argv)
parser.add_argument('-d', '--data-port', help='serial port auxiliary communication', required=not nrst or '-n' in sys.argv or '--no-discovery' in sys.argv)
parser.add_argument('-f', '--force-handler', help='force handler for data processing (disables autodetection)', required=False)
parser.add_argument('-n', '--no-discovery', help='no discovery for USB devices (avoids pre-access to the XDS debug probe)', action='store_true')
# Hier werden die Argumente des Parsers (aus der Shell vermutlich) in
# die Variable args übergeben
args = parser.parse_args()
# Testprint
print("\nIch printe ARGS aus dem try von __main__: " + str(args) + "\n")
# ---
dev, prts = None, (None, None)
# Testprint
print("\nIch printe DEV und PRTS aus der __main__: " + str(dev) + str(prts) + "\n")
nrst = nrst and not args.no_discovery
# Testprint
print("\nIch printe args.no_discovery aus __main__: " + str(args.no_discovery))
print("\nIch printe VARIABLE nrst aus __main__ erneut: " + str(nrst) + "\n")
if nrst:
# Testprint
print("\nIch bin in der IF NRST\n")
try:
dev = usb_discover(*XDS_USB)
# Testprint
print("\nIch bin in dem try nach IF NRST!\nIch printe Variable dev:\n" + str(dev))
if len(dev) == 0: raise Exception('no device detected')
dev = dev[0]
print_log(' - '.join([dev._details_[k] for k in dev._details_]))
for rst in (False,):
try:
xds_test(dev, reset=rst)
break
except:
pass
prts = serial_discover(*XDS_USB, sid=dev._details_['serial'])
if len(prts) != 2: raise Exception('unknown device configuration detected')
except:
nrst = False
# Testprint
print("\nBin in der except von IF NRST\n")
# ---
if args.control_port is None: args.control_port = prts[0]
if args.data_port is None: args.data_port = prts[1]
# ---
mss = None
# Hier wird der Controlport (COM4) seriell ausgelesen bzw. eine Verbindung aufgebaut.
# Diese wird dann im Thread für _read_ verwendet, um die Daten auszulesen
con = serial.Serial(args.control_port, 115200, timeout=0.01)
# Testprint
print("\nPrinte con aus der __main__\n" + str(con) + "\n")
if con is None: raise Exception('not able to connect to control port')
print_log('control port: {} - data port: {}'.format(args.control_port, args.data_port))
# Testprint
print("\nTestprint, ob ich nach dem print_log noch printen kann!\nIch print args.force_handler: " +
str(args.force_handler))
if args.force_handler:
print_log('handler: {}'.format(args.force_handler))
# Der geht scheinbar hier in diese beiden Threads rein, die die target-Funktionen starten, und wartet dort,
# bis ich was mache
tusr = threading.Thread(target=_read_, args=(con, args.data_port, None if not nrst else 2, args.force_handler))
tusr.start()
tstd = threading.Thread(target=_input_, args=(con,), )
tstd.start()
# Testprint
print("\nIch bin hier nach dem Starten der Threads und will überprüfen, ob ich diesen Bereich erreiche, nachdem die Threads starten\n")
# ---
if nrst:
# Testprint
print("\nIch bin in der IF-ANWEISUNG VON NRST = TRUE\n")
xds_reset(dev)
usb_free(dev)
else:
print('\nwaiting for reset (NRST) of the device', file=sys.stderr, flush=True)
except Exception as e:
# Testprint
print("\nIch bin in dem except von __main__\n")
print_log(e, sys._getframe())
os._exit(1)
|
conftest.py
|
"""Fixtures and setup / teardown functions
Tasks:
1. setup test database before starting the tests
2. delete test database after running the tests
"""
import os
import copy
import random
from collections import namedtuple
from logging import getLogger
from logging.config import dictConfig
import pytest
from pymongo import MongoClient
from bigchaindb.common import crypto
from bigchaindb.log import setup_logging
from bigchaindb.tendermint_utils import key_from_base64
from bigchaindb.common.crypto import (key_pair_from_ed25519_key,
public_key_from_ed25519_key)
from bigchaindb.lib import Block
TEST_DB_NAME = 'bigchain_test'
USER2_SK, USER2_PK = crypto.generate_key_pair()
# Test user. inputs will be created for this user. Cryptography Keys
USER_PRIVATE_KEY = '8eJ8q9ZQpReWyQT5aFCiwtZ5wDZC4eDnCen88p3tQ6ie'
USER_PUBLIC_KEY = 'JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE'
def pytest_runtest_setup(item):
if isinstance(item, item.Function):
backend = item.session.config.getoption('--database-backend')
if (item.get_marker('localmongodb') and backend != 'localmongodb'):
pytest.skip('Skip tendermint specific tests if not using localmongodb')
def pytest_addoption(parser):
from bigchaindb.backend.connection import BACKENDS
backends = ', '.join(BACKENDS.keys())
parser.addoption(
'--database-backend',
action='store',
default=os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'localmongodb'),
help='Defines the backend to use (available: {})'.format(backends),
)
def pytest_ignore_collect(path, config):
from bigchaindb.backend.connection import BACKENDS
path = str(path)
supported_backends = BACKENDS.keys()
if os.path.isdir(path):
dirname = os.path.split(path)[1]
if dirname in supported_backends and dirname != config.getoption('--database-backend'):
print('Ignoring unrequested backend test dir: ', path)
return True
def pytest_configure(config):
config.addinivalue_line(
'markers',
'bdb(): Mark the test as needing BigchainDB.'
'BigchainDB will be configured such that the database and tables are available for an '
'entire test session.'
'You need to run a backend (e.g. MongoDB) '
'prior to running tests with this marker. You should not need to restart the backend '
'in between tests runs since the test infrastructure flushes the backend upon session end.'
)
config.addinivalue_line(
'markers',
'abci(): Mark the test as needing a running ABCI server in place. Use this marker'
'for tests that require a running Tendermint instance. Note that the test infrastructure'
'has no way to reset Tendermint data upon session end - you need to do it manually.'
'Setup performed by this marker includes the steps performed by the bdb marker.'
)
@pytest.fixture(autouse=True)
def _bdb_marker(request):
if request.keywords.get('bdb', None):
request.getfixturevalue('_bdb')
@pytest.fixture(autouse=True)
def _restore_config(_configure_bigchaindb):
from bigchaindb import config, config_utils
config_before_test = copy.deepcopy(config)
yield
config_utils.set_config(config_before_test)
@pytest.fixture(scope='session')
def _configure_bigchaindb(request):
import bigchaindb
from bigchaindb import config_utils
test_db_name = TEST_DB_NAME
# Put a suffix like _gw0, _gw1 etc on xdist processes
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
test_db_name = '{}_{}'.format(TEST_DB_NAME, xdist_suffix)
backend = request.config.getoption('--database-backend')
config = {
'database': bigchaindb._database_map[backend],
'tendermint': {
'host': 'localhost',
'port': 26657,
}
}
config['database']['name'] = test_db_name
config = config_utils.env_config(config)
config_utils.set_config(config)
# NOTE: since we use a custom log level
# for benchmark logging we need to setup logging
setup_logging()
@pytest.fixture(scope='session')
def _setup_database(_configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
print('Initializing test db')
dbname = config['database']['name']
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
schema.init_database(conn)
print('Finishing init database')
yield
print('Deleting `{}` database'.format(dbname))
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
print('Finished deleting `{}`'.format(dbname))
@pytest.fixture
def _bdb(_setup_database, _configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect
from .utils import flush_db
conn = connect()
yield
dbname = config['database']['name']
flush_db(conn, dbname)
# We need this function to avoid loading an existing
# conf file located in the home of the user running
# the tests. If it's too aggressive we can change it
# later.
@pytest.fixture
def ignore_local_config_file(monkeypatch):
def mock_file_config(filename=None):
return {}
monkeypatch.setattr('bigchaindb.config_utils.file_config',
mock_file_config)
@pytest.fixture
def reset_logging_config():
# root_logger_level = getLogger().level
root_logger_level = 'DEBUG'
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
yield
getLogger().setLevel(root_logger_level)
@pytest.fixture
def user_sk():
return USER_PRIVATE_KEY
@pytest.fixture
def user_pk():
return USER_PUBLIC_KEY
@pytest.fixture
def user2_sk():
return USER2_SK
@pytest.fixture
def user2_pk():
return USER2_PK
@pytest.fixture
def alice():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def alice_privkey(alice):
return alice.private_key
@pytest.fixture
def alice_pubkey(alice):
return alice.public_key
@pytest.fixture
def bob():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def bob_privkey(bob):
return bob.private_key
@pytest.fixture
def bob_pubkey(carol):
return bob.public_key
@pytest.fixture
def carol():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def carol_privkey(carol):
return carol.private_key
@pytest.fixture
def carol_pubkey(carol):
return carol.public_key
@pytest.fixture
def merlin():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def merlin_privkey(merlin):
return merlin.private_key
@pytest.fixture
def merlin_pubkey(merlin):
return merlin.public_key
@pytest.fixture
def b():
from bigchaindb import BigchainDB
return BigchainDB()
@pytest.fixture
def tb():
from bigchaindb import BigchainDB
return BigchainDB()
@pytest.fixture
def create_tx(alice, user_pk):
from bigchaindb.models import Transaction
name = f'I am created by the create_tx fixture. My random identifier is {random.random()}.'
return Transaction.create([alice.public_key], [([user_pk], 1)], asset={'name': name})
@pytest.fixture
def signed_create_tx(alice, create_tx):
return create_tx.sign([alice.private_key])
@pytest.mark.abci
@pytest.fixture
def posted_create_tx(b, signed_create_tx):
res = b.post_transaction(signed_create_tx, 'broadcast_tx_commit')
assert res.status_code == 200
return signed_create_tx
@pytest.fixture
def signed_transfer_tx(signed_create_tx, user_pk, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(inputs, [([user_pk], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
@pytest.fixture
def double_spend_tx(signed_create_tx, carol_pubkey, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(
inputs, [([carol_pubkey], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
def _get_height(b):
maybe_block = b.get_latest_block()
return 0 if maybe_block is None else maybe_block['height']
@pytest.fixture
def inputs(user_pk, b, alice):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
for block in range(4):
transactions = [
Transaction.create(
[alice_pubkey(alice)],
[([user_pk], 1)],
metadata={'msg': random.random()},
).sign([alice_privkey(alice)]).to_dict()
for _ in range(10)
]
block = Block(app_hash='', height=_get_height(b), transactions=transactions)
b.store_block(block._asdict())
@pytest.fixture
def inputs_shared(user_pk, user2_pk, alice):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
for block in range(4):
transactions = [
Transaction.create(
[alice.public_key],
[user_pk, user2_pk],
metadata={'msg': random.random()},
).sign([alice.private_key]).to_dict()
for _ in range(10)
]
block = Block(app_hash='', height=_get_height(b), transaction=transactions)
b.store_block(block._asdict())
@pytest.fixture
def dummy_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import (DatabaseDoesNotExist,
DatabaseAlreadyExists)
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.init_database(conn, dbname)
except DatabaseAlreadyExists:
schema.drop_database(conn, dbname)
schema.init_database(conn, dbname)
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def not_yet_created_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def db_config():
from bigchaindb import config
return config['database']
@pytest.fixture
def db_host(db_config):
return db_config['host']
@pytest.fixture
def db_port(db_config):
return db_config['port']
@pytest.fixture
def db_name(db_config):
return db_config['name']
@pytest.fixture
def db_conn():
from bigchaindb.backend import connect
return connect()
@pytest.fixture
def db_context(db_config, db_host, db_port, db_name, db_conn):
DBContext = namedtuple(
'DBContext', ('config', 'host', 'port', 'name', 'conn'))
return DBContext(
config=db_config,
host=db_host,
port=db_port,
name=db_name,
conn=db_conn,
)
@pytest.fixture
def tendermint_host():
return os.getenv('BIGCHAINDB_TENDERMINT_HOST', 'localhost')
@pytest.fixture
def tendermint_port():
return int(os.getenv('BIGCHAINDB_TENDERMINT_PORT', 26657))
@pytest.fixture
def tendermint_ws_url(tendermint_host, tendermint_port):
return 'ws://{}:{}/websocket'.format(tendermint_host, tendermint_port)
@pytest.fixture
def tendermint_context(tendermint_host, tendermint_port, tendermint_ws_url):
TendermintContext = namedtuple(
'TendermintContext', ('host', 'port', 'ws_url'))
return TendermintContext(
host=tendermint_host,
port=tendermint_port,
ws_url=tendermint_ws_url,
)
@pytest.fixture
def mocked_setup_pub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_pub_logger', autospec=True, spec_set=True)
@pytest.fixture
def mocked_setup_sub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_sub_logger', autospec=True, spec_set=True)
@pytest.fixture(autouse=True)
def _abci_http(request):
if request.keywords.get('abci', None):
request.getfixturevalue('abci_http')
@pytest.fixture
def abci_http(_setup_database, _configure_bigchaindb, abci_server,
tendermint_host, tendermint_port):
import requests
import time
for i in range(300):
try:
uri = 'http://{}:{}/abci_info'.format(tendermint_host, tendermint_port)
requests.get(uri)
return True
except requests.exceptions.RequestException as e:
pass
time.sleep(1)
return False
@pytest.yield_fixture(scope='session')
def event_loop(request):
import asyncio
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.mark.bdb
@pytest.fixture(scope='session')
def abci_server():
from abci import ABCIServer
from bigchaindb.core import App
from bigchaindb.utils import Process
app = ABCIServer(app=App())
abci_proxy = Process(name='ABCI', target=app.run)
yield abci_proxy.start()
abci_proxy.terminate()
@pytest.fixture
def wsserver_config():
from bigchaindb import config
return config['wsserver']
@pytest.fixture
def wsserver_scheme(wsserver_config):
return wsserver_config['advertised_scheme']
@pytest.fixture
def wsserver_host(wsserver_config):
return wsserver_config['advertised_host']
@pytest.fixture
def wsserver_port(wsserver_config):
return wsserver_config['advertised_port']
@pytest.fixture
def wsserver_base_url(wsserver_scheme, wsserver_host, wsserver_port):
return '{}://{}:{}'.format(wsserver_scheme, wsserver_host, wsserver_port)
@pytest.fixture
def unspent_output_0():
return {
'amount': 1,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 0,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d'
}
@pytest.fixture
def unspent_output_1():
return {
'amount': 2,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 1,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_output_2():
return {
'amount': 3,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 2,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_outputs(unspent_output_0, unspent_output_1, unspent_output_2):
return unspent_output_0, unspent_output_1, unspent_output_2
@pytest.fixture
def mongo_client(db_context):
return MongoClient(host=db_context.host, port=db_context.port)
@pytest.fixture
def utxo_collection(db_context, mongo_client):
return mongo_client[db_context.name].utxos
@pytest.fixture
def dummy_unspent_outputs():
return [
{'transaction_id': 'a', 'output_index': 0},
{'transaction_id': 'a', 'output_index': 1},
{'transaction_id': 'b', 'output_index': 0},
]
@pytest.fixture
def utxoset(dummy_unspent_outputs, utxo_collection):
res = utxo_collection.insert_many(copy.deepcopy(dummy_unspent_outputs))
assert res.acknowledged
assert len(res.inserted_ids) == 3
return dummy_unspent_outputs, utxo_collection
@pytest.fixture
def network_validators(node_keys):
validator_pub_power = {}
voting_power = [8, 10, 7, 9]
for pub, priv in node_keys.items():
validator_pub_power[pub] = voting_power.pop()
return validator_pub_power
@pytest.fixture
def network_validators58(network_validators):
network_validators_base58 = {}
for p, v in network_validators.items():
p = public_key_from_ed25519_key(key_from_base64(p))
network_validators_base58[p] = v
return network_validators_base58
@pytest.fixture
def node_key(node_keys):
(pub, priv) = list(node_keys.items())[0]
return key_pair_from_ed25519_key(key_from_base64(priv))
@pytest.fixture(scope='session')
def node_keys():
return {'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=':
'cM5oW4J0zmUSZ/+QRoRlincvgCwR0pEjFoY//ZnnjD3Mv8Nqy8q6VdnOFI0XDHhwtFcqRIz0Y8rtjSdngUTKUw==',
'GIijU7GBcVyiVUcB0GwWZbxCxdk2xV6pxdvL24s/AqM=':
'mdz7IjP6mGXs6+ebgGJkn7kTXByUeeGhV+9aVthLuEAYiKNTsYFxXKJVRwHQbBZlvELF2TbFXqnF28vbiz8Cow==',
'JbfwrLvCVIwOPm8tj8936ki7IYbmGHjPiKb6nAZegRA=':
'83VINXdj2ynOHuhvSZz5tGuOE5oYzIi0mEximkX1KYMlt/Csu8JUjA4+by2Pz3fqSLshhuYYeM+IpvqcBl6BEA==',
'PecJ58SaNRsWJZodDmqjpCWqG6btdwXFHLyE40RYlYM=':
'uz8bYgoL4rHErWT1gjjrnA+W7bgD/uDQWSRKDmC8otc95wnnxJo1GxYlmh0OaqOkJaobpu13BcUcvITjRFiVgw=='}
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
from bdb import Breakpoint
def reset_Breakpoint():
Breakpoint.next = 1
Breakpoint.bplist = {}
Breakpoint.bpbynumber = [None]
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(support.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@support.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with support.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.gcs_pubsub import gcs_pubsub_enabled, GcsPublisher, \
GcsSubscriber
from ray._private.runtime_env.py_modules import upload_py_modules_if_needed
from ray._private.runtime_env.working_dir import upload_working_dir_if_needed
from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import (_internal_kv_initialized,
_initialize_internal_kv,
_internal_kv_reset, _internal_kv_get)
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return self.core_worker.get_current_runtime_env()
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address),
# If the owner address is set, then the initial reference is
# already acquired internally in CoreWorker::CreateOwned.
# TODO(ekl) we should unify the code path more with the others
# to avoid this special case.
skip_adding_local_ref=(owner_address is not None))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.gcs_client.internal_kv_put(
b"Lock:" + key, b"1", False,
ray_constants.KV_NAMESPACE_FUNCTION_TABLE) == 0
if function_exported is True:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.gcs_client.internal_kv_put(
key,
pickle.dumps({
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
}), True, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
subscriber = self.redis_client.pubsub(ignore_subscribe_messages=True)
subscriber.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_hex = self.current_job_id.hex()
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = subscriber.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_hex != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
subscriber.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = ray_constants.LOGGER_LEVEL,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows.
.. code-block:: python
ray.init(address="auto")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address, _deprecation_warn_enabled=False)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if RAY_JOB_CONFIG_JSON_ENV_VAR in os.environ:
if runtime_env:
logger.warning(
"Both RAY_JOB_CONFIG_JSON_ENV_VAR and ray.init(runtime_env) "
"are provided, only using JSON_ENV_VAR to construct "
"job_config. Please ensure no runtime_env is used in driver "
"script's ray.init() when using job submission API.")
# Set runtime_env in job_config if passed as env variable, such as
# ray job submission with driver script executed in subprocess
job_config_json = json.loads(
os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR))
job_config = ray.job_config.JobConfig.from_json(job_config_json)
# RAY_JOB_CONFIG_JSON_ENV_VAR is only set at ray job manager level and has
# higher priority in case user also provided runtime_env for ray.init()
elif runtime_env:
# Set runtime_env in job_config if passed in as part of ray.init()
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# disconnect internal kv
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
_internal_kv_reset()
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def listen_error_messages_from_gcs(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to be published.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
# Exports that are published after the call to
# gcs_subscriber.subscribe_error() and before the call to
# gcs_subscriber.poll_error() will still be processed in the loop.
# TODO: we should just subscribe to the errors for this specific job.
worker.gcs_subscriber.subscribe_error()
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if received a signal that the thread should stop.
if threads_stopped.is_set():
return
_, error_data = worker.gcs_subscriber.poll_error()
if error_data is None:
continue
if error_data.job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if error_data.type == ray_constants.TASK_PUSH_ERROR:
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, ConnectionError) as e:
logger.error(f"listen_error_messages_from_gcs: {e}")
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
startup_token=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
startup_token (int): The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
worker.gcs_channel = gcs_utils.GcsChannel(redis_client=worker.redis_client)
worker.gcs_client = gcs_utils.GcsClient(worker.gcs_channel)
_initialize_internal_kv(worker.gcs_client)
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
worker.gcs_pubsub_enabled = gcs_pubsub_enabled()
worker.gcs_publisher = None
if worker.gcs_pubsub_enabled:
worker.gcs_publisher = GcsPublisher(
channel=worker.gcs_channel.channel())
worker.gcs_subscriber = GcsSubscriber(
channel=worker.gcs_channel.channel())
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.publish_error_to_driver(
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None,
redis_client=worker.redis_client,
gcs_publisher=worker.gcs_publisher)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if (mode == SCRIPT_MODE and not job_config.client_job
and job_config.runtime_env):
scratch_dir: str = worker.node.get_runtime_env_dir_path()
runtime_env = job_config.runtime_env or {}
runtime_env = upload_py_modules_if_needed(
runtime_env, scratch_dir, logger=logger)
runtime_env = upload_working_dir_if_needed(
runtime_env, scratch_dir, logger=logger)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env)
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid, startup_token)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_from_gcs
if worker.gcs_pubsub_enabled else listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and not job_config.runtime_env_has_uris():
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
tracing_hook_val = worker.gcs_client.internal_kv_get(
b"tracing_startup_hook", ray_constants.KV_NAMESPACE_TRACING)
if tracing_hook_val is not None:
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
tracing_hook_val.decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "gcs_subscriber"):
worker.gcs_subscriber.close()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
placement_group="default",
worker=None,
retry_exceptions=None,
concurrency_groups=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env, placement_group)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(
function_or_class, num_cpus, num_gpus, memory,
object_store_memory, resources, accelerator_type, max_restarts,
max_task_retries, runtime_env, concurrency_groups)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"accelerator_type",
"max_calls",
"max_restarts",
"max_task_retries",
"max_retries",
"runtime_env",
"retry_exceptions",
"placement_group",
"concurrency_groups",
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
placement_group = kwargs.get("placement_group", "default")
retry_exceptions = kwargs.get("retry_exceptions")
concurrency_groups = kwargs.get("concurrency_groups")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
placement_group=placement_group,
worker=worker,
retry_exceptions=retry_exceptions,
concurrency_groups=concurrency_groups or [])
|
logging.py
|
"""Cyberjunky's 3Commas bot helpers."""
import json
import logging
import os
import queue
import threading
import time
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import apprise
class NotificationHandler:
"""Notification class."""
def __init__(self, program, enabled=False, notify_urls=None):
self.program = program
self.message = ""
if enabled and notify_urls:
self.apobj = apprise.Apprise()
urls = json.loads(notify_urls)
for url in urls:
self.apobj.add(url)
self.queue = queue.Queue()
self.start_worker()
self.enabled = True
else:
self.enabled = False
def start_worker(self):
"""Start notification worker."""
threading.Thread(target=self.process_queue, daemon=True).start()
def process_queue(self):
"""Process the queue."""
while True:
message, attachments = self.queue.get()
if attachments:
self.apobj.notify(body=message, attach=attachments)
else:
self.apobj.notify(body=message)
self.queue.task_done()
def queue_notification(self, message):
"""Queue notification messages."""
if self.enabled:
self.message += f"{message}\n\n"
def send_notification(self):
"""Send the notification messages."""
if self.enabled:
msg = f"[3C Cyber Bot-Helper {self.program}]\n\n" + self.message
self.queue.put((msg, []))
self.message = ""
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7, encoding='utf-8'):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
encoding=encoding
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(
self,
datadir,
program,
notificationhandler,
logstokeep,
debug_enabled,
notify_enabled,
):
"""Logger init."""
self.my_logger = logging.getLogger()
self.datadir = datadir
self.program = program
self.notify_enabled = notify_enabled
self.notificationhandler = notificationhandler
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
f"%(asctime)s - {program} - %(levelname)s - %(message)s", date_fmt
)
console_formatter = logging.Formatter(
f"%(asctime)s - {program} - %(message)s", date_fmt
)
# Create directory if not exists
if not os.path.exists(f"{self.datadir}/logs"):
os.makedirs(f"{self.datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{self.datadir}/logs/{self.program}.log", backupCount=logstokeep
)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
self.info(f"3C Cyber Bot-Helper {program}")
self.info("Started on %s" % time.strftime("%A %H:%M:%S %Y-%m-%d"))
if self.notify_enabled:
self.info("Notifications are enabled")
else:
self.info("Notifications are disabled")
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message, notify=False):
"""Info level."""
self.log(message, "info")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def warning(self, message, notify=True):
"""Warning level."""
self.log(message, "warning")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def error(self, message, notify=True):
"""Error level."""
self.log(message, "error")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def debug(self, message, notify=False):
"""Debug level."""
self.log(message, "debug")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
|
mergebot.py
|
#!/usr/bin/python
"""Mergebot is a program which merges approved SCM changes into a master repo.
"""
import glob
from multiprocessing import Pipe, Process
from time import sleep
import signal
import yaml
from mergebot_backend import mergebot_poller
from mergebot_backend.log_helper import get_logger
from mergebot_frontend import db
from mergebot_frontend.models import Poller, QueuedItem
l = get_logger('mergebot')
def shutdown_mergebot(signum, _):
"""shutdown_mergebot is the handler used to receive the kill signal."""
print 'Caught {signal}.'.format(signal=signum)
raise ServerExit
class ServerExit(Exception):
"""ServerExit is a more specific exception for when we terminate."""
pass
def main():
"""Reads configs and kicks off pollers.
main reads the configs and then kicks off pollers per config file
successfully read in. It then waits for the signal to shut down itself
and its children.
"""
l.info('Mergebot manager starting up.')
configs = parse_configs()
l.info('Cleaning up old tables.')
Poller.query.delete()
QueuedItem.query.delete()
db.session.commit()
# Set signals to ignore while we set up the mergers, then set appropriately.
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
pollers = start_pollers(configs)
signal.signal(signal.SIGINT, shutdown_mergebot)
signal.signal(signal.SIGTERM, shutdown_mergebot)
try:
while True:
sleep(1)
except ServerExit:
l.info('Caught terminate signal; killing children and exiting.')
for poller in pollers:
poller.pipe.send('terminate')
for poller in pollers:
poller.process.join()
l.info('Children killed, done.')
class MergeBotConfig(object):
"""Defines MergeBot configuration for a project."""
def __init__(self,
name,
proj_name,
github_org,
repository,
merge_branch,
verification_branch,
scm_type,
jenkins_location,
prepare_command,
verification_job_name):
self.name = name
self.proj_name = proj_name
self.github_org = github_org
self.repository = repository
self.merge_branch = merge_branch
self.verification_branch = verification_branch
self.scm_type = scm_type
self.jenkins_location = jenkins_location
self.prepare_command = prepare_command
self.verification_job_name = verification_job_name
def mergebotconfig_constructor(loader, node):
"""mergebotconfig_constructor is an object constructor for the yaml library.
It is intended to be used to validate that all configs contain the
appropriate fields.
"""
values = loader.construct_mapping(node)
try:
name = values['name']
proj_name = values['proj_name']
github_org = values['github_org']
repository = values['repository']
merge_branch = values['merge_branch']
verification_branch = values['verification_branch']
scm_type = values['scm_type']
jenkins_location = values['jenkins_location']
prepare_command = values['prepare_command']
verification_job_name = values['verification_job_name']
except KeyError as exc:
raise yaml.YAMLError('problem with key {exc}'.format(exc=exc))
return MergeBotConfig(name,
proj_name,
github_org,
repository,
merge_branch,
verification_branch,
scm_type,
jenkins_location,
prepare_command,
verification_job_name)
def parse_configs():
"""Parses config files out of config/ directory.
Returns:
Array of MergeBotConfig objects.
"""
configs = []
yaml.add_constructor(u'!MergeBotConfig', mergebotconfig_constructor)
l.info('Parsing Config Files')
for filename in glob.iglob('config/*.yaml'):
with open(filename) as cfg:
try:
l.info('Opening {}'.format(filename))
config = yaml.load(cfg)
l.info('{} Successfully Read'.format(filename))
configs.append(config)
except yaml.YAMLError as exc:
l.fatal(
'Error parsing file {filename}: {exc}. Please fix and try '
'again.'.format(filename=filename, exc=exc))
return configs
class PollerInfo(object):
"""MergerInfo contains important hooks for the pollers we are running."""
def __init__(self, process, pipe):
self.process = process
self.pipe = pipe
def poll_scm(config, pipe):
"""poll_scm handles delegating a single repository's work to an SCM poller.
Args:
config: A dictionary of configuration to use for the poller.
pipe: Communication pipe for passing messages.
"""
try:
poller = mergebot_poller.create_poller(config, pipe)
poller.poll()
except BaseException as exc:
l.error('Poller for {name} crashed with exception {exc}. Please '
'restart and try again'.format(name=config.name, exc=exc))
def start_pollers(configs):
"""start_pollers starts a set of pollers for specified configurations.
Args:
configs: Configurations for the pollers.
Returns:
Array of poller info (process, comm pipe).
"""
pollers = []
for config in configs:
parent_pipe, child_pipe = Pipe()
p = Process(target=poll_scm, args=(config, child_pipe,))
pollers.append(PollerInfo(
process=p,
pipe=parent_pipe))
l.info('Starting poller for {}.'.format(config.name))
p.start()
return pollers
if __name__ == '__main__':
main()
|
tk_raw_analy_ver0.6.py
|
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(outH) + 'x' + str(outW))
canvas = Canvas(window, width=outW, height=outH)
paper = PhotoImage(width=outW, height=outH)
canvas.create_image((outW/2, outH/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH) :
for k in range(0, outW) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (k,i))
threading.Thread(target=putPixel).start()
canvas.pack()
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
## 메인 코드부
window = Tk(); window.geometry('200x200');
window.title('영상 처리&데이터 분석 Ver 0.5')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop()
|
base.py
|
import collections.abc
import datetime
import logging
import logging.handlers
import numpy as np
import os
import pandas as pd
import random
import re
import string
import sys
import threading
import time
import traceback
import warnings
import yaml
from abc import ABC, abstractmethod
from enum import Enum
from glob import glob
from inspect import signature
from io import TextIOBase, StringIO, SEEK_END
from itertools import product
from logging import Logger
from matplotlib.figure import Figure
from numpy import ndarray
from pandas import DataFrame
from ray.util.multiprocessing import Pool
from ray.util.queue import Queue
from shutil import copyfileobj
from tqdm.auto import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
get_origin,
get_args,
overload,
Union,
)
class PropertyTag(property):
domain: Optional[Iterable] = None
@classmethod
def get_properties(cls: Type[property], target: object) -> Dict[str, property]:
res: Dict[str, property] = {}
for name in dir(target):
if hasattr(target, name):
member = getattr(target, name)
if isinstance(member, cls):
res[name] = member
return res
class attribute(PropertyTag):
def __init__(
self,
fget: Optional[Callable[[Any], Any]] = None,
fset: Optional[Callable[[Any, Any], None]] = None,
fdel: Optional[Callable[[Any], None]] = None,
doc: Optional[str] = None,
domain: Optional[Iterable] = None,
) -> None:
super().__init__(fget, fset, fdel, doc)
self.domain = domain
def __call__(
self,
fget: Optional[Callable[[Any], Any]] = None,
fset: Optional[Callable[[Any, Any], None]] = None,
fdel: Optional[Callable[[Any], None]] = None,
doc: Optional[str] = None,
) -> "attribute":
if fget is None:
fget = self.fget
if fset is None:
fset = self.fset
if fdel is None:
fdel = self.fdel
if doc is None:
doc = self.__doc__
return type(self)(fget, fset, fdel, doc, self.domain)
__isattribute__ = True
class result(PropertyTag):
__isresult__ = True
def extract_simpletype(target: object) -> type:
pass
def extract_enumtype(target: object) -> Optional[Type[Enum]]:
if isinstance(target, type) and issubclass(target, Enum):
return target
else:
origin = get_origin(target)
if origin is not None:
for arg in get_args(target):
argtype = extract_enumtype(arg)
if isinstance(argtype, type) and issubclass(argtype, Enum):
return argtype
return None
def get_property_and_getter(target: object, name: str) -> Tuple[property, Callable]:
member = getattr(target, name)
if not isinstance(member, property):
raise ValueError("The specified member '%s' is not a property." % name)
if member.fget is None:
raise ValueError("The specified member '%s' does not have a getter." % name)
return member, member.fget
def get_property_type(target: object, name: str) -> Optional[Type]:
_, getter = get_property_and_getter(target, name)
sign = signature(getter)
a = sign.return_annotation
if get_origin(a) in [collections.abc.Sequence, collections.abc.Iterable, list, set] and len(get_args(a)) > 0:
a = get_args(a)[0]
if get_origin(a) == Union:
a = next(x for x in get_args(a) if x is not None)
return a if isinstance(a, type) else None
def get_property_domain(target: object, name: str) -> List[Any]:
prop, getter = get_property_and_getter(target, name)
sign = signature(getter)
enum = extract_enumtype(sign.return_annotation)
if isinstance(prop, PropertyTag) and prop.domain is not None:
return list(prop.domain)
elif sign.return_annotation is bool:
return [False, True]
elif enum is None:
return [None]
else:
return list(enum.__members__.values())
def get_property_helpstring(target: object, name: str) -> Optional[str]:
prop, getter = get_property_and_getter(target, name)
return getter.__doc__
def get_property_default(target: object, name: str) -> Optional[Any]:
member = getattr(target, "__init__")
sign = signature(member)
param = sign.parameters.get(name, None)
if param is not None and param.default is not param.empty:
return param.default
else:
return None
def get_property_value(target: object, name: str) -> Any:
member = getattr(target, name)
if isinstance(target, type):
if isinstance(member, property) and member.fget is not None:
return member.fget(target)
else:
return None
else:
return member
def get_property_isiterable(target: object, name: str) -> bool:
_, getter = get_property_and_getter(target, name)
sign = signature(getter)
a = sign.return_annotation
return get_origin(a) in [collections.abc.Sequence, collections.abc.Iterable, list, set]
def has_attribute_value(target: object, name: str, value: Any, ignore_none: bool = True) -> bool:
target_value = get_property_value(target, name)
if not isinstance(value, Iterable):
value = [value]
if ignore_none:
return target_value is None or value == [None] or target_value in value
else:
return target_value in value
def save_dict(source: Dict[str, Any], dirpath: str, basename: str) -> None:
basedict: Dict[str, Any] = dict((k, v) for (k, v) in source.items() if type(v) in [int, float, bool, str])
basedict.update(dict((k, v.value) for (k, v) in source.items() if isinstance(v, Enum)))
if len(basedict) > 0:
with open(os.path.join(dirpath, ".".join([basename, "yaml"])), "w") as f:
yaml.safe_dump(basedict, f)
for name, data in source.items():
if data is None:
continue
if name not in basedict:
if isinstance(data, ndarray):
filename = os.path.join(dirpath, ".".join([basename, name, "npy"]))
np.save(filename, data)
elif isinstance(data, DataFrame):
filename = os.path.join(dirpath, ".".join([basename, name, "csv"]))
data.to_csv(filename)
elif isinstance(data, dict):
filename = os.path.join(dirpath, ".".join([basename, name, "yaml"]))
with open(filename, "w") as f:
yaml.safe_dump(data, f)
elif isinstance(data, Figure):
extra_artists = tuple(data.legends) + tuple(data.texts)
filename = os.path.join(dirpath, ".".join([basename, name, "pdf"]))
data.savefig(fname=filename, bbox_extra_artists=extra_artists, bbox_inches="tight")
filename = os.path.join(dirpath, ".".join([basename, name, "png"]))
data.savefig(fname=filename, bbox_extra_artists=extra_artists, bbox_inches="tight")
else:
raise ValueError("Key '%s' has unsupported type '%s'." % (name, str(type(data))))
def load_dict(dirpath: str, basename: str) -> Dict[str, Any]:
if not os.path.isdir(dirpath):
raise ValueError("The provided path '%s' does not point to a directory." % dirpath)
res: Dict[str, Any] = {}
filename = os.path.join(dirpath, ".".join([basename, "yaml"]))
if os.path.isfile(filename):
with open(filename, "r") as f:
res.update(yaml.safe_load(f))
for path in glob(os.path.join(dirpath, basename) + "*"):
filename = os.path.basename(path)
base, ext = os.path.splitext(filename)
name = base[len(basename) + 1 :] # noqa: E203
if name == "":
continue
if ext == ".npy":
res[name] = np.load(path)
elif ext == ".csv":
res[name] = pd.read_csv(path)
elif ext == ".yaml":
with open(path) as f:
res[name] = yaml.safe_load(f)
else:
warnings.warn("File '%s' with unsupported extension '%s' will be ignored." % (path, ext))
return res
class Progress:
class Event:
class Type(Enum):
START = "start"
UPDATE = "update"
CLOSE = "close"
def __init__(self, type: "Progress.Event.Type", id: str, **kwargs: Any) -> None:
self.type = type
self.id = id
self.kwargs = kwargs
pbars: Dict[str, tqdm] = {}
def __init__(self, queue: Optional[Queue] = None, id: Optional[str] = None) -> None:
self._queue = queue
self._id = id if id is not None else "".join(random.choices(string.ascii_lowercase + string.digits, k=10))
def start(self, total: Optional[int] = None, desc: Optional[str] = None) -> None:
self._submit(Progress.Event(Progress.Event.Type.START, self._id, total=total, desc=desc))
def update(self, n: int = 1) -> None:
self._submit(Progress.Event(Progress.Event.Type.UPDATE, self._id, n=n))
def close(self) -> None:
self._submit(Progress.Event(Progress.Event.Type.CLOSE, self._id))
def _submit(self, event: "Progress.Event") -> None:
if self._queue is None:
self.handle(event)
else:
self._queue.put(event)
def new(self, id: Optional[str] = None) -> "Progress":
return Progress(self._queue, id)
@property
def queue(self) -> Optional[Queue]:
return self._queue
@queue.setter
def queue(self, value: Optional[Queue]) -> None:
self._queue = value
@classmethod
def refresh(cls: Type["Progress"]) -> None:
for pbar in cls.pbars.values():
pbar.refresh()
@classmethod
def handle(cls: Type["Progress"], event: "Progress.Event") -> None:
if event.type == Progress.Event.Type.START:
cls.pbars[event.id] = tqdm(desc=event.kwargs["desc"], total=event.kwargs["total"])
elif event.type == Progress.Event.Type.UPDATE:
cls.pbars[event.id].update(event.kwargs["n"])
elif event.type == Progress.Event.Type.CLOSE:
cls.pbars[event.id].close()
del cls.pbars[event.id]
# Ensure that bars get redrawn properly after they reshuffle due to closure of one of them.
cls.refresh()
class Scenario(ABC):
scenarios: Dict[str, Type["Scenario"]] = {}
scenario_domains: Dict[str, Dict[str, Set[Any]]] = {}
attribute_domains: Dict[str, Set[Any]] = {}
attribute_helpstrings: Dict[str, Optional[str]] = {}
attribute_types: Dict[str, Optional[type]] = {}
attribute_defaults: Dict[str, Optional[Any]] = {}
attribute_isiterable: Dict[str, bool] = {}
_scenario: Optional[str] = None
def __init__(self, id: Optional[str] = None, logstream: Optional[TextIOBase] = None, **kwargs: Any) -> None:
super().__init__()
self._id = id if id is not None else "".join(random.choices(string.ascii_lowercase + string.digits, k=10))
self._logstream = logstream if logstream is not None else StringIO()
self._progress = Progress(id=self._id)
self._attributes: Optional[Dict[str, Any]] = None
def __init_subclass__(cls: Type["Scenario"], id: Optional[str] = None) -> None:
if id is None:
return
# Register scenario under the given name.
cls._scenario = id
Scenario.scenarios[id] = cls
assert isinstance(Scenario.scenario, PropertyTag)
assert isinstance(Scenario.scenario.domain, set)
Scenario.scenario.domain.add(id)
# Extract domain of scenario.
props = attribute.get_properties(cls)
domain = dict((name, set(get_property_domain(cls, name))) for name in props.keys())
Scenario.scenario_domains[id] = domain
# Include the new domain into the total domain.
for name, values in domain.items():
Scenario.attribute_domains.setdefault(name, set()).update(values)
# Extract types of the scenario attributes.
types = dict((name, get_property_type(cls, name)) for name in props.keys())
Scenario.attribute_types.update(types)
# Extract helpstrings of the scenario attributes.
helpstrings = dict((name, get_property_helpstring(cls, name)) for name in props.keys())
Scenario.attribute_helpstrings.update(helpstrings)
# Extract types of the scenario attributes.
defaults = dict((name, get_property_default(cls, name)) for name in props.keys())
Scenario.attribute_defaults.update(defaults)
# Set all attributes to be iterable when passed to get_instances.
isiterable = dict((k, True) for k in props.keys())
Scenario.attribute_isiterable.update(isiterable)
def run(self, progress_bar: bool = True, console_log: bool = True) -> None:
# Set up logging.
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
fh = logging.StreamHandler(self._logstream)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch: Optional[logging.Handler] = None
if console_log:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.info("Run started for scenario: %s" % str(self))
timestart = time.time()
self._run(progress_bar=progress_bar)
duration = datetime.timedelta(seconds=int(time.time() - timestart))
# duration = datetime.time(0, 0, int(time.time() - timestart)).strftime("%H:%M:%S")
self.logger.info("Run completed. Duration: %s" % str(duration))
# Clear logging handlers.
self.logger.removeHandler(fh)
if ch is not None:
self.logger.removeHandler(ch)
@abstractmethod
def _run(self, progress_bar: bool = True, **kwargs: Any) -> None:
raise NotImplementedError()
@property
@abstractmethod
def completed(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def dataframe(self) -> DataFrame:
raise NotImplementedError()
@attribute(domain=set())
def scenario(self) -> str:
"""Type of scenario."""
if self._scenario is None:
raise ValueError("Cannot call this on an abstract class instance.")
return self._scenario
@attribute
def id(self) -> str:
"""A unique identifier of the scenario."""
return self._id
@property
def progress(self) -> Progress:
return self._progress
@property
def logger(self) -> Logger:
return logging.getLogger(self._id)
@property
def logstream(self) -> Optional[TextIOBase]:
return self._logstream
@property
def log(self) -> str:
result = ""
self._logstream.seek(0)
result = "\n".join(self._logstream.readlines())
self._logstream.seek(0, SEEK_END)
return result
@property
def attributes(self) -> Dict[str, Any]:
if self._attributes is None:
props = attribute.get_properties(self.__class__)
self._attributes = dict((name, get_property_value(self, name)) for name in props.keys())
return self._attributes
@property
def keyword_replacements(self) -> Dict[str, str]:
return {}
@classmethod
def get_instances(cls, **kwargs: Any) -> Iterable["Scenario"]:
if cls == Scenario:
for id, scenario in Scenario.scenarios.items():
if kwargs.get("scenario", None) is None or id in kwargs["scenario"]:
for instance in scenario.get_instances(**kwargs):
yield instance
else:
domains = []
names = Scenario.attribute_domains.keys()
for name in names:
if name in kwargs and kwargs[name] is not None:
domain = kwargs[name]
if not isinstance(domain, Iterable):
domain = [domain]
domains.append(list(domain))
else:
domains.append(list(Scenario.attribute_domains[name]))
for values in product(*domains):
attributes = dict((name, value) for (name, value) in zip(names, values) if value is not None)
if cls.is_valid_config(**attributes):
yield cls(**attributes)
@classmethod
def is_valid_config(cls, **attributes: Any) -> bool:
return True
def save(self, path: str) -> None:
if os.path.splitext(path)[1] != "":
raise ValueError("The provided path '%s' is not a valid directory path." % path)
os.makedirs(path, exist_ok=True)
# Write a log file.
logpath = os.path.join(path, "scenario.log")
with open(logpath, "w") as f:
self._logstream.seek(0)
copyfileobj(self._logstream, f)
self._logstream.seek(0, SEEK_END)
# Save attributes as a single yaml file.
props = attribute.get_properties(type(self))
attributes = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
save_dict(attributes, path, "attributes")
# Save results as separate files.
props = result.get_properties(type(self))
results = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
save_dict(results, path, "results")
def __str__(self) -> str:
props = attribute.get_properties(type(self))
attributes = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
for k, v in attributes.items():
if isinstance(v, Enum):
attributes[k] = v.value
return "(%s)" % ", ".join(["%s=%s" % (str(k), str(v)) for (k, v) in attributes.items()])
@classmethod
def from_dict(cls, source: Dict[str, Any]) -> "Scenario":
scenario_id = source["scenario"]
scenario_cls = cls.scenarios[scenario_id]
return scenario_cls(**source)
@classmethod
def load(cls, path: str) -> "Scenario":
if not os.path.isdir(path):
raise ValueError("The provided path '%s' does not point to a directory." % path)
# Load the log file.
logpath = os.path.join(path, "scenario.log")
logstream: Optional[TextIOBase] = None
if os.path.isfile(logpath):
with open(logpath, "r") as f:
logstream = StringIO(f.read())
attributes = load_dict(path, "attributes")
results = load_dict(path, "results")
kwargs = {"logstream": logstream}
return cls.from_dict({**attributes, **results, **kwargs})
def is_match(self, other: "Scenario") -> bool:
other_attributes = other.attributes
return all(other_attributes.get(k, None) == v for (k, v) in self.attributes.items() if k != "id")
V = TypeVar("V")
def get_value(obj: Any, key: str) -> Any:
if hasattr(obj, key):
return getattr(obj, key)
else:
return None
class Table(Sequence[V]):
def __init__(self, data: Sequence[V], attributes: List[str] = [], key: Optional[str] = None):
self._data = data
self._attributes = attributes
self._key = key
@overload
def __getitem__(self, index: int) -> V:
return self._data.__getitem__(index)
@overload
def __getitem__(self, index: slice) -> Sequence[V]:
return self._data.__getitem__(index)
def __getitem__(self, index: Union[int, slice]) -> Union[V, Sequence[V]]:
return self._data.__getitem__(index)
def __len__(self) -> int:
return self._data.__len__()
@property
def df(self):
df = pd.DataFrame.from_dict({a: [get_value(x, a) for x in self._data] for a in self._attributes})
if self._key is not None:
df.set_index(self._key, inplace=True)
return df
def __repr__(self) -> str:
return self.df.__repr__()
def _repr_html_(self) -> Optional[str]:
return self.df._repr_html_()
DEFAULT_RESULTS_PATH = os.path.join("var", "results")
DEFAULT_REPORTS_PATH = os.path.join("var", "reports")
ALL_STUDY_PATHS = glob(os.path.join(DEFAULT_RESULTS_PATH, "*"))
DEFAULT_STUDY_PATH = max(ALL_STUDY_PATHS, key=lambda x: os.path.getmtime(x)) if len(ALL_STUDY_PATHS) > 0 else None
DEFAULT_SCENARIO_PATH_FORMAT = "{id}"
class Study:
def __init__(
self,
scenarios: Sequence[Scenario],
id: Optional[str] = None,
outpath: str = DEFAULT_RESULTS_PATH,
scenario_path_format: str = DEFAULT_SCENARIO_PATH_FORMAT,
logstream: Optional[TextIOBase] = None,
) -> None:
self._scenarios = scenarios
self._id = id if id is not None else datetime.datetime.now().strftime("Study-%Y-%m-%d-%H-%M-%S")
self._outpath = outpath
self._scenario_path_format = scenario_path_format
self._logstream = logstream if logstream is not None else StringIO()
self._logger = logging.getLogger(self._id)
self._verify_scenario_path(scenario_path_format, scenarios)
@staticmethod
def _get_scenario_runner(
queue: Optional[Queue] = None,
catch_exceptions: bool = True,
progress_bar: bool = True,
console_log: bool = True,
rerun: bool = False,
) -> Callable[[Scenario], Scenario]:
def _scenario_runner(scenario: Scenario) -> Scenario:
try:
scenario.progress.queue = queue
scenario.logger.setLevel(logging.DEBUG)
qh: Optional[logging.Handler] = None
ch: Optional[logging.Handler] = None
if queue is not None:
qh = logging.handlers.QueueHandler(queue) # type: ignore
scenario.logger.addHandler(qh)
elif console_log:
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
scenario.logger.addHandler(ch)
if rerun or not scenario.completed:
if queue is not None:
scenario.run(progress_bar=progress_bar, console_log=False)
else:
with logging_redirect_tqdm(loggers=[scenario.logger]):
scenario.run(progress_bar=progress_bar, console_log=False)
else:
scenario.logger.info("Scenario instance already completed. Skipping...")
except Exception as e:
if catch_exceptions:
trace_output = traceback.format_exc()
scenario.logger.error(trace_output)
else:
raise e
finally:
scenario.progress.queue = None
if qh is not None:
scenario.logger.removeHandler(qh)
if ch is not None:
scenario.logger.removeHandler(ch)
return scenario
return _scenario_runner
@staticmethod
def _status_monitor(queue: Queue, logger: Logger) -> None:
while True:
record: Optional[Union[logging.LogRecord, Progress.Event]] = queue.get()
if record is None:
break
if isinstance(record, Progress.Event):
Progress.handle(record)
else:
# logger = logging.getLogger(record.name)
logger.handle(record)
Progress.refresh()
def run(
self,
catch_exceptions: bool = True,
progress_bar: bool = True,
console_log: bool = True,
parallel: bool = True,
ray_address: Optional[str] = None,
ray_numprocs: Optional[int] = None,
eagersave: bool = True,
**kwargs: Any
) -> None:
# Set up logging.
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s")
fh = logging.StreamHandler(self._logstream)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch: Optional[logging.Handler] = None
if console_log:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set up progress bar.
# pbar = None if not progress_bar else tqdm(total=len(self.scenarios), desc="Scenarios", position=0)
queue = Queue() if parallel else None
pbar = None if not progress_bar else Progress(queue, id=self.id)
if pbar is not None:
pbar.start(total=len(self.scenarios), desc="Scenarios")
with logging_redirect_tqdm(loggers=[self.logger]):
# for scenario in self.scenarios:
# try:
# scenario.run(logger=self._logger, progress_bar=progress_bar, **kwargs)
# except Exception as e:
# if catch_exceptions:
# trace_output = traceback.format_exc()
# print(trace_output)
# else:
# raise e
# finally:
# if pbar is not None:
# pbar.update(1)
scenarios = []
runner = Study._get_scenario_runner(queue, catch_exceptions, progress_bar, console_log)
if parallel:
monitor = threading.Thread(target=Study._status_monitor, args=(queue, self.logger))
monitor.start()
pool = Pool(processes=ray_numprocs, ray_address=ray_address)
for scenario in pool.imap_unordered(runner, self.scenarios):
scenarios.append(scenario)
if pbar is not None:
pbar.update(1)
if eagersave:
self.save_scenario(scenario)
else:
for scenario in map(runner, self.scenarios):
scenarios.append(scenario)
if pbar is not None:
pbar.update(1)
if eagersave:
self.save_scenario(scenario)
self._scenarios = scenarios
if pbar is not None:
pbar.close()
if parallel:
assert queue is not None and monitor is not None
queue.put(None)
monitor.join()
# Clear logging handlers.
self.logger.removeHandler(fh)
if ch is not None:
self.logger.removeHandler(ch)
@staticmethod
def _verify_scenario_path(scenario_path_format: str, scenarios: Iterable[Scenario]) -> None:
attributes = re.findall(r"\{(.*?)\}", scenario_path_format)
scenario_paths: Set[str] = set()
for scenario in scenarios:
replacements = dict((name, get_property_value(scenario, name)) for name in attributes)
exppath = scenario_path_format.format_map(replacements)
if exppath in scenario_paths:
raise ValueError(
"Provided scenario_path does not produce unique paths. The path '%s' caused a conflict." % exppath
)
scenario_paths.add(exppath)
def save_scenario(self, scenario: Scenario) -> str:
if self.path is None:
raise ValueError("This scenario has no output path.")
attributes = re.findall(r"\{(.*?)\}", self._scenario_path_format)
replacements = dict((name, get_property_value(scenario, name)) for name in attributes)
exppath = self._scenario_path_format.format_map(replacements)
full_exppath = os.path.join(self.path, "scenarios", exppath)
scenario.save(full_exppath)
return full_exppath
def save(self, save_scenarios: bool = True) -> None:
# Make directory that will contain the study.
os.makedirs(self.path, exist_ok=True)
# Write a marker file.
markerpath = os.path.join(self.path, "study.yml")
with open(markerpath, "w") as f:
yaml.safe_dump({"id": self.id, "scenario_path_format": self.scenario_path_format}, f)
# Write a log file.
logpath = os.path.join(self.path, "study.log")
with open(logpath, "w") as f:
self._logstream.seek(0)
copyfileobj(self._logstream, f)
self._logstream.seek(0, SEEK_END)
# Verify that the provided scenario path is unique enough.
# self._verify_scenario_path(self._scenario_path_format, self.scenarios)
# Iterate over all scenarios and compute their target paths.
# attributes = re.findall(r"\{(.*?)\}", scenario_path)
# scenario_paths: Set[str] = set()
if save_scenarios:
for scenario in self.scenarios:
self.save_scenario(scenario)
# replacements = dict((name, get_property_value(scenario, name)) for name in attributes)
# exppath = scenario_path.format_map(replacements)
# if exppath in scenario_paths:
# raise ValueError(
# "Provided scenario_path does not produce unique paths. The path '%s' caused a conflict."
# % exppath
# )
# scenario_paths.add(exppath)
# full_exppath = os.path.join(self.path, "scenarios", exppath)
# scenario.save(full_exppath)
@classmethod
def load(cls, path: str, id: Optional[str] = None) -> "Study":
outpath = path
if id is None:
id = os.path.basename(path)
outpath = os.path.dirname(path)
else:
path = os.path.join(path, id)
# Load the marker file.
markerpath = os.path.join(path, "study.yml")
with open(markerpath) as f:
metadata = yaml.safe_load(f)
if metadata["id"] != id:
raise ValueError(
"ID mismatch between the provided '%s' and the encountered '%s'." % (id, metadata["id"])
)
# Load the log file.
logpath = os.path.join(path, "study.log")
logstream: Optional[TextIOBase] = None
if os.path.isfile(logpath):
with open(logpath, "r") as f:
logstream = StringIO(f.read())
# Load all scenarios.
scenarios: List[Scenario] = []
for attpath in glob(os.path.join(path, "**/attributes.yaml"), recursive=True):
exppath = os.path.dirname(attpath)
scenario = Scenario.load(exppath)
scenarios.append(scenario)
# Reconstruct study and return it.
result = Study(
scenarios=scenarios,
id=id,
outpath=outpath,
scenario_path_format=metadata["scenario_path_format"],
logstream=logstream,
)
return result
@classmethod
def isstudy(cls, path: str) -> bool:
return os.path.isfile(os.path.join(path, "study.yml"))
@property
def id(self) -> str:
return self._id
@property
def path(self) -> str:
return os.path.join(self._outpath, self._id)
@property
def scenario_path_format(self) -> str:
return self._scenario_path_format
@property
def logger(self) -> Logger:
return self._logger
@property
def logstream(self) -> Optional[TextIOBase]:
return self._logstream
@property
def log(self) -> str:
result = ""
self._logstream.seek(0)
result = "\n".join(self._logstream.readlines())
self._logstream.seek(0, SEEK_END)
return result
@property
def completed(self) -> bool:
return all(exp.completed for exp in self.scenarios if isinstance(exp, Scenario))
@property
def scenarios(self) -> Table[Scenario]:
attributes = list(Scenario.attribute_domains.keys())
return Table[Scenario](self._scenarios, attributes, "id")
@property
def dataframe(self) -> DataFrame:
df = pd.concat([scenario.dataframe for scenario in self.scenarios], ignore_index=True)
df.sort_index(inplace=True)
return df
def get_scenarios(self, **attributes: Dict[str, Any]) -> Sequence[Scenario]:
res: List[Scenario] = []
for exp in self.scenarios:
if all(has_attribute_value(exp, name, value) for (name, value) in attributes.items() if hasattr(exp, name)):
res.append(exp)
return res
def represent(x: Any):
if isinstance(x, Enum):
return repr(x.value)
else:
return repr(x)
class Report(ABC):
reports: Dict[str, Type["Report"]] = {}
report_domains: Dict[str, Dict[str, Set[Any]]] = {}
attribute_domains: Dict[str, Set[Any]] = {}
attribute_helpstrings: Dict[str, Optional[str]] = {}
attribute_types: Dict[str, Optional[type]] = {}
attribute_defaults: Dict[str, Optional[Any]] = {}
attribute_isiterable: Dict[str, bool] = {}
_report: Optional[str] = None
def __init__(
self, study: Study, id: Optional[str] = None, groupby: Optional[Dict[str, Any]] = None, **kwargs: Any
) -> None:
super().__init__()
self._study = study
self._id = id if id is not None else "".join(random.choices(string.ascii_lowercase + string.digits, k=10))
self._groupby: Dict[str, Any] = {} if groupby is None else groupby
def __init_subclass__(cls: Type["Report"], id: str) -> None:
cls._report = id
cls.reports[id] = cls
# Extract domain of scenario.
props = attribute.get_properties(cls)
domain = dict((name, set(get_property_domain(cls, name))) for name in props.keys())
Report.report_domains[id] = domain
# Include the new domain into the total domain.
for name, values in domain.items():
Report.attribute_domains.setdefault(name, set()).update(values)
# Extract types of the scenario attributes.
types = dict((name, get_property_type(cls, name)) for name in props.keys())
Report.attribute_types.update(types)
# Extract helpstrings of the scenario attributes.
helpstrings = dict((name, get_property_helpstring(cls, name)) for name in props.keys())
Report.attribute_helpstrings.update(helpstrings)
# Extract types of the scenario attributes.
defaults = dict((name, get_property_default(cls, name)) for name in props.keys())
Report.attribute_defaults.update(defaults)
# Specify if attributes should be iterable when passed to get_instances.
Report.attribute_isiterable = dict((name, get_property_isiterable(cls, name)) for name in props.keys())
Report.attribute_isiterable["report"] = True # We hard code that we allow multiple report argument values.
@attribute
def report(self) -> str:
if self._report is None:
raise ValueError("Cannot call this on an abstract class instance.")
return self._report
@attribute
def id(self) -> str:
"""A unique identifier of the report."""
return self._id
@property
def groupby(self) -> Dict[str, Any]:
return self._groupby
@property
def study(self) -> Study:
return self._study
@classmethod
def is_valid_config(cls, **attributes: Any) -> bool:
return True
@abstractmethod
def generate(self) -> None:
raise NotImplementedError()
def save(
self, path: Optional[str] = None, use_groupby: bool = True, use_id: bool = False, use_subdirs: bool = False
) -> None:
if path is None:
path = os.path.join(self._study.path, "reports")
basename = "report"
if use_subdirs:
if use_groupby:
groupby = [self._groupby[key] for key in sorted(self.groupby.keys())]
path = os.path.join(path, *groupby)
if use_id:
path = os.path.join(path, self._id)
if os.path.splitext(path)[1] != "":
raise ValueError("The provided path '%s' is not a valid directory path." % path)
else:
if use_groupby:
groupby = [self._groupby[key] for key in sorted(self.groupby.keys())]
basename = "_".join(
[basename]
+ ["%s=%s" % (str(key), represent(self._groupby[key])) for key in sorted(self.groupby.keys())]
)
if use_id:
basename = basename + "_id=" + self._id
os.makedirs(path, exist_ok=True)
# Save results as separate files.
props = result.get_properties(type(self))
results = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
save_dict(results, path, basename)
@classmethod
def get_instances(
cls: Type["Report"], study: Study, groupby: Optional[Sequence[str]], **kwargs: Any
) -> Iterable["Report"]:
if cls == Report:
for id, report in Report.reports.items():
if kwargs.get("report", None) is None or id in kwargs["report"]:
for instance in report.get_instances(study=study, groupby=groupby, **kwargs):
yield instance
else:
# If grouping attributes were not specified, then we return only a single instance.
if groupby is None or len(groupby) == 0:
yield cls(study=study, **kwargs)
else:
# Find distinct grouping attribute assignments.
all_values: List[Tuple] = []
if len(study.scenarios) > 0:
all_values = list(study.dataframe.groupby(groupby).groups.keys())
for values in all_values:
groupby_values = dict((k, v) for (k, v) in zip(groupby, values))
yield cls(study=study, groupby=groupby_values, **kwargs)
|
server.py
|
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
import time
import collections
import tornado.gen
import itertools
from grab.util.py3k_support import *
class ServerState(object):
PORT = 9876
EXTRA_PORT1 = 9877
EXTRA_PORT2 = 9878
BASE_URL = None
REQUEST = {}
RESPONSE = {}
RESPONSE_ONCE = {'headers': []}
SLEEP = {}
TIMEOUT_ITERATOR = None
def reset(self):
self.BASE_URL = 'http://localhost:%d' % self.PORT
self.REQUEST.update({
'args': {},
'headers': {},
'cookies': None,
'path': None,
'method': None,
'charset': 'utf-8',
})
self.RESPONSE.update({
'get': '',
'post': '',
'cookies': None,
'headers': [],
'get_callback': None,
'code': 200,
})
self.RESPONSE_ONCE.update({
'get': None,
'post': None,
'code': None,
'cookies': None,
})
self.SLEEP.update({
'get': 0,
'post': 0,
})
for x in xrange(len(self.RESPONSE_ONCE['headers'])):
self.RESPONSE_ONCE['headers'].pop()
SERVER = ServerState()
class MainHandler(tornado.web.RequestHandler):
def decode_argument(self, value, **kwargs):
return value.decode(SERVER.REQUEST['charset'])
@tornado.web.asynchronous
@tornado.gen.engine
def method_handler(self):
method_name = self.request.method.lower()
if SERVER.SLEEP.get(method_name, None):
time.sleep(SERVER.SLEEP[method_name])
SERVER.REQUEST['args'] = {}
for key in self.request.arguments.keys():
SERVER.REQUEST['args'][key] = self.get_argument(key)
SERVER.REQUEST['headers'] = self.request.headers
SERVER.REQUEST['path'] = self.request.path
SERVER.REQUEST['method'] = self.request.method
SERVER.REQUEST['cookies'] = self.request.cookies
charset = SERVER.REQUEST['charset']
SERVER.REQUEST['post'] = self.request.body
callback_name = '%s_callback' % method_name
if SERVER.RESPONSE.get(callback_name) is not None:
SERVER.RESPONSE[callback_name](self)
else:
headers_sent = set()
if SERVER.RESPONSE_ONCE['code']:
self.set_status(SERVER.RESPONSE_ONCE['code'])
SERVER.RESPONSE_ONCE['code'] = None
else:
self.set_status(SERVER.RESPONSE['code'])
if SERVER.RESPONSE_ONCE['cookies']:
for name, value in sorted(SERVER.RESPONSE_ONCE['cookies'].items()):
# Set-Cookie: name=newvalue; expires=date; path=/; domain=.example.org.
self.add_header('Set-Cookie', '%s=%s' % (name, value))
SERVER.RESPONSE_ONCE['cookies'] = None
else:
if SERVER.RESPONSE['cookies']:
for name, value in sorted(SERVER.RESPONSE['cookies'].items()):
# Set-Cookie: name=newvalue; expires=date; path=/; domain=.example.org.
self.add_header('Set-Cookie', '%s=%s' % (name, value))
if SERVER.RESPONSE['headers']:
for name, value in SERVER.RESPONSE['headers']:
self.set_header(name, value)
while SERVER.RESPONSE_ONCE['headers']:
key, value = SERVER.RESPONSE_ONCE['headers'].pop()
self.set_header(key, value)
headers_sent.add(key)
self.set_header('Listen-Port', str(self.application._listen_port))
if not 'Content-Type' in headers_sent:
charset = 'utf-8'
self.set_header('Content-Type', 'text/html; charset=%s' % charset)
headers_sent.add('Content-Type')
if SERVER.RESPONSE_ONCE.get(method_name) is not None:
self.write(SERVER.RESPONSE_ONCE[method_name])
SERVER.RESPONSE_ONCE[method_name] = None
else:
resp = SERVER.RESPONSE.get(method_name, '')
if isinstance(resp, collections.Callable):
self.write(resp())
else:
self.write(resp)
if SERVER.TIMEOUT_ITERATOR:
yield tornado.gen.Task(IOLoop.instance().add_timeout,
time.time() + next(SERVER.TIMEOUT_ITERATOR))
self.finish()
get = method_handler
post = method_handler
put = method_handler
patch = method_handler
delete = method_handler
app1 = tornado.web.Application([
(r"^.*", MainHandler),
])
app1._listen_port = SERVER.PORT
app2 = tornado.web.Application([
(r"^.*", MainHandler),
])
app2._listen_port = SERVER.EXTRA_PORT1
app3 = tornado.web.Application([
(r"^.*", MainHandler),
])
app3._listen_port = SERVER.EXTRA_PORT2
app1.listen(app1._listen_port)
app2.listen(app2._listen_port)
app3.listen(app3._listen_port)
def start_server():
def func():
tornado.ioloop.IOLoop.instance().start()
SERVER.reset()
th = Thread(target=func)
th.daemon = True
th.start()
time.sleep(0.1)
def stop_server():
tornado.ioloop.IOLoop.instance().stop()
|
train.py
|
import argparse, torch, os, sys, time, math, shutil, threading
from ssd import SSD
from imgaug import augmenters as iaa
from dataset.logos.logo_dataset import LogoDataset
"""
MAIN ARGUMENTS
"""
str2bool = lambda x: x.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='SSD plug & play training')
# Architecture settings
parser.add_argument('--architecture', default='300_VGG16', type=str, help='Based architecture to construct SSD', choices=list(SSD.ARCHITECTURES.keys()))
parser.add_argument('--pretrained_base', default=True, type=str2bool, help='Use pretrained based architecture')
parser.add_argument('--resume', default=None, type=str, help='Saved state_dict of the model')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda or not')
parser.add_argument('--dataset_name', default="logos", type=str, help='Dataset name')
# Learning settings
parser.add_argument('--init', default=None, type=str, help='Saved state_dict of the model')
parser.add_argument('--lr', default=1e-4, type=float, help='Initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='Momentum value')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay value')
parser.add_argument('--num_workers', default=4, type=int, help='Number of dataloader workers')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size in training')
parser.add_argument('--batch_size_val', default=20, type=int, help='Batch size in validation')
parser.add_argument('--start_epochs', default=1, type=int, help='Start epochs')
parser.add_argument('--epochs', default=200, type=int, help='Epochs')
parser.add_argument('--iterations', default=1000, type=int, help='Iterations in training')
parser.add_argument('--iterations_monitor', default=10, type=int, help='Iterations monitor in training')
parser.add_argument('--iterations_val', default=50, type=int, help='Iterations in validation')
# Visualization settings
parser.add_argument('--tensorboardX', default=True, type=str2bool, help='Use tensorboardX during learning')
parser.add_argument('--log_folder', default='log', type=str, help='Folder to save logs')
# Save models
parser.add_argument('--save_folder', default='checkpoints', type=str, help='Folder to save checkpoints')
parser.add_argument('--save_name', default='model_{architecture}_{epoch}_{dataset_name}.pth.tar', type=str, help='Name of model checkpoint')
parser.add_argument('--save_interval', default=1, type=int, help='Epochs interval to save')
args = parser.parse_args()
# Show summary
print('Summary:')
print('======================================')
for k in args.__dict__:
data = args.__dict__[k]
if k == 'save_name':
data = data.format(architecture=args.architecture, epoch="{epoch}", dataset_name=args.dataset_name)
print('%s: %s' % (k, data))
print('======================================')
"""
CUDA
"""
has_cuda = args.cuda and torch.cuda.is_available()
"""
CHECK HAVE TENSORBOARDX
"""
if args.tensorboardX:
has_tensorboardX = True
try:
import tensorboardX
log_folder = args.log_folder + "/{architecture}_{dataset_name}".format(architecture=args.architecture, dataset_name=args.dataset_name)
train_writer = tensorboardX.SummaryWriter(log_folder + "/training")
if args.iterations_val > 0:
val_writer = tensorboardX.SummaryWriter(log_folder + "/validation")
except:
has_tensorboardX = False
else:
has_tensorboardX = False
"""
DATA AUGMENTATION
"""
AUGMENTERS = [
# Flip Images left-right
iaa.Fliplr(0.5),
# Change Contrast
iaa.Multiply((1, 1.5)),
# Apply affine transforms with -20,20 pixels of translation, 0.5, 1.2 of scales, -30,30 of rotation and -10, 10 of shear.
iaa.Affine(
translate_px={"x": (-20, 20), "y": (-20, 20)},
scale=(0.5, 1.2),
rotate=(-30, 30),
shear=(-10, 10)
),
# With a 0.5 probability, we select only one of these elements
iaa.Sometimes(0.5,
iaa.OneOf([
# Apply a contrast normalization
iaa.ContrastNormalization((0.75, 1.5)),
# Add noise
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
# Smooth image
iaa.GaussianBlur(sigma=(0, 3.0))
])
)
]
"""
MODEL
"""
model_net = SSD(architecture=args.architecture, cuda=has_cuda, pretrained=args.pretrained_base, num_classes=len(LogoDataset.CLASSES))
train_writer.add_graph(model_net, torch.rand(1, 3, model_net.image_size, model_net.image_size))
if has_cuda:
net = torch.nn.DataParallel(model_net)
torch.backends.cudnn.benchmark = True
else:
net = model_net
# Load weights
def init(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight.data)
m.bias.data.zero_()
if args.resume is not None:
print('Loading checkpoint / model...')
model_net.load_model(torch.load(args.resume)['model'])
else:
# Init weights of base
if not args.pretrained_base:
print('Init pretrained base...')
model_net.base_net.apply(init)
else:
print('Loading pretrained base...')
# Init the rest of weights
print('Init misc, extras, locations and confidences...')
model_net.apply_only_non_base(init)
"""
DATASET DEFINITION
"""
train_dataset = LogoDataset(root=os.path.dirname(os.path.abspath(__file__)) + '/dataset/logos/data', transform=SSD.Utils.Transform(AUGMENTERS, model_net.image_size))
val_dataset = LogoDataset(root=os.path.dirname(os.path.abspath(__file__)) + '/dataset/logos/data', transform=SSD.Utils.Transform(None, model_net.image_size))
size_train_dataset = int(0.8 * len(train_dataset))
indices = torch.randperm(len(train_dataset)).cpu()
train_dataset, val_dataset = torch.utils.data.dataset.Subset(train_dataset, indices[:size_train_dataset]), torch.utils.data.dataset.Subset(val_dataset, indices[size_train_dataset:])
"""
OPTIMIZER DEFINITION
"""
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
"""
START TRAINING
"""
data_loader_train = torch.utils.data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=SSD.Utils.collate_fn, pin_memory=True)
data_loader_val = torch.utils.data.DataLoader(val_dataset, args.batch_size_val, num_workers=args.num_workers, shuffle=True, collate_fn=SSD.Utils.collate_fn, pin_memory=True)
# Main for to train
def train(epoch):
net.train()
batch_iter = iter(data_loader_train)
loss_mean = 0
loss_counter = 0
loss_mean_monitoring = 0
loss_counter_monitoring = 0
time_mean_monitoring = 0
time_counter_monitoring = 0
for iteration in range(args.iterations):
# Load data
try:
images, targets = next(batch_iter)
except StopIteration:
batch_iter = iter(data_loader_train)
images, targets = next(batch_iter)
if has_cuda:
images = images.cuda()
targets = [target.cuda() for target in targets]
t0 = time.time()
optimizer.zero_grad()
loss, _ = model_net.compute_loss(images, targets, net=net)
if not math.isinf(loss):
loss_mean += loss.item()
loss_counter += 1
loss_mean_monitoring += loss.item()
loss_counter_monitoring += 1
loss.backward()
optimizer.step()
time_mean_monitoring += time.time() - t0
time_counter_monitoring += 1
if iteration > 0 and (iteration + 1) % args.iterations_monitor == 0:
print('epoch: %d || iteration: %d || Loss: %.4f ||' % (epoch, iteration + 1, loss_mean_monitoring / loss_counter_monitoring), end=' ')
print('timer: %.4f sec.' % (time_mean_monitoring / args.iterations_monitor))
loss_mean_monitoring = 0
loss_counter_monitoring = 0
time_mean_monitoring = 0
time_counter_monitoring = 0
if loss_counter == 0:
return 0
return loss_mean / loss_counter
def validation(epoch):
net.eval()
with torch.no_grad():
batch_iter = iter(data_loader_val)
loss_mean = 0
loss_counter = 0
t0 = time.time()
for iteration in range(args.iterations_val):
# Load data
try:
images, targets = next(batch_iter)
except StopIteration:
batch_iter = iter(data_loader_val)
images, targets = next(batch_iter)
if has_cuda:
images = images.cuda()
targets = [target.cuda() for target in targets]
loss, _ = model_net.compute_loss(images, targets, net=net)
if not math.isinf(loss):
loss_mean += loss.item()
loss_counter += 1
t1 = time.time()
print('epoch %d || validation || Loss: %.4f ||' % (epoch, loss_mean / args.iterations_val), end=' ')
print('timer: %.4f sec.' % (t1 - t0))
if loss_counter == 0:
return 0
return loss_mean / loss_counter
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
os.makedirs(args.save_folder, exist_ok=True)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_name)
best_val = float('inf')
best_name = args.save_folder + "/" + args.save_name.format(architecture=args.architecture, epoch="best", dataset_name=args.dataset_name)
for epoch in range(args.start_epochs - 1, args.epochs):
epoch_1 = epoch + 1
loss_train = train(epoch_1)
if args.iterations_val > 0:
loss_val = validation(epoch_1)
else:
loss_val = loss_train
if best_val > loss_val:
is_best = True
best_val = loss_val
else:
is_best = False
if has_tensorboardX:
threading.Thread(target= lambda: train_writer.add_scalar('Loss', loss_train, int(epoch_1))).start()
if args.iterations_val > 0:
threading.Thread(target= lambda: val_writer.add_scalar('Loss', loss_val, int(epoch_1))).start()
if epoch_1 % args.save_interval == 0:
save_checkpoint({
'epoch': epoch_1,
'model': model_net.save_model(),
'best_loss': best_val,
}, is_best, args.save_folder + "/" + args.save_name.format(architecture=args.architecture, epoch=epoch_1, dataset_name=args.dataset_name))
|
script_request_queue_test.py
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests ScriptRequestQueueTest functionality"""
import time
import unittest
from threading import Thread, Lock
from streamlit.script_request_queue import RerunData
from streamlit.script_request_queue import ScriptRequestQueue
from streamlit.script_runner import ScriptRequest
from streamlit.proto.WidgetStates_pb2 import WidgetStates
from streamlit.widgets import Widgets
def _create_widget(id, states):
states.widgets.add().id = id
return states.widgets[-1]
class ScriptRequestQueueTest(unittest.TestCase):
def test_dequeue(self):
"""Test that we can enqueue and dequeue on different threads"""
queue = ScriptRequestQueue()
# This should return immediately
self.assertEqual((None, None), queue.dequeue())
lock = Lock()
dequeued_evt = [None]
def get_event():
with lock:
return dequeued_evt[0]
def set_event(value):
with lock:
dequeued_evt[0] = value
def do_dequeue():
event = None
while event is None:
event, _ = queue.dequeue()
set_event(event)
thread = Thread(target=do_dequeue, name="test_dequeue")
thread.start()
self.assertIsNone(get_event())
queue.enqueue(ScriptRequest.STOP)
time.sleep(0.1)
self.assertEqual(ScriptRequest.STOP, get_event())
thread.join(timeout=0.25)
self.assertFalse(thread.is_alive())
def test_rerun_data_coalescing(self):
"""Test that multiple RERUN requests get coalesced with
expected values.
(This is similar to widgets_test.test_coalesce_widget_states -
it's testing the same thing, but through the ScriptEventQueue
interface.)
"""
queue = ScriptRequestQueue()
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("int", states).int_value = 123
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=states))
states = WidgetStates()
_create_widget("trigger", states).trigger_value = False
_create_widget("int", states).int_value = 456
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=states))
event, data = queue.dequeue()
self.assertEqual(event, ScriptRequest.RERUN)
widgets = Widgets()
widgets.set_state(data.widget_states)
# Coalesced triggers should be True if either the old or
# new value was True
self.assertEqual(True, widgets.get_widget_value("trigger"))
# Other widgets should have their newest value
self.assertEqual(456, widgets.get_widget_value("int"))
# We should have no more events
self.assertEqual((None, None), queue.dequeue(), "Expected empty event queue")
# Test that we can coalesce if previous widget state is None
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=None))
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=None))
states = WidgetStates()
_create_widget("int", states).int_value = 789
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=states))
event, data = queue.dequeue()
widgets = Widgets()
widgets.set_state(data.widget_states)
self.assertEqual(event, ScriptRequest.RERUN)
self.assertEqual(789, widgets.get_widget_value("int"))
# We should have no more events
self.assertEqual((None, None), queue.dequeue(), "Expected empty event queue")
# Test that we can coalesce if our *new* widget state is None
states = WidgetStates()
_create_widget("int", states).int_value = 101112
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=states))
queue.enqueue(ScriptRequest.RERUN, RerunData(widget_states=None))
event, data = queue.dequeue()
widgets = Widgets()
widgets.set_state(data.widget_states)
self.assertEqual(event, ScriptRequest.RERUN)
self.assertEqual(101112, widgets.get_widget_value("int"))
# We should have no more events
self.assertEqual((None, None), queue.dequeue(), "Expected empty event queue")
|
session_debug_testlib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.VariableV1(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
@test_util.run_v1_only("b/120545219")
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.compat.v1.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.VariableV1(str1_init, name=str1_name)
str2 = variables.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_denylists(
run_options,
sess.graph,
node_name_regex_denylist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.VariableV1(10.0, name="x")
y = variables.VariableV1(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertLessEqual(3, len(bad_data))
node_names = [datum.node_name for datum in bad_data]
self.assertIn(x_name, node_names)
self.assertIn(y_name, node_names)
self.assertIn(z_name, node_names)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertLessEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
node_names = [datum.node_name for datum in bad_data]
self.assertIn(y_name, node_names)
self.assertIn(z_name, node_names)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.VariableV1(1.0, name="v1")
v2 = variables.VariableV1(2.0, name="v2")
v3 = variables.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.VariableV1(10.0, name="v")
delta = variables.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
file_io.delete_recursively(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
file_io.delete_recursively(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.VariableV1(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.VariableV1("1", name="a")
b = variables.VariableV1("3", name="b")
c = variables.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertLessEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
file_io.delete_recursively(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertLessEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.VariableV1([10.0, 10.0], name="a")
b = variables.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, tuple)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, tuple)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.VariableV1(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
test_policies.py
|
# Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from itertools import islice, cycle
from mock import Mock
from random import randint
import six
import sys
import struct
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.metadata import Metadata
from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy,
TokenAwarePolicy, SimpleConvictionPolicy,
HostDistance, ExponentialReconnectionPolicy,
RetryPolicy, WriteType,
DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy,
LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy)
from cassandra.pool import Host
from cassandra.query import Statement
from six.moves import xrange
class LoadBalancingPolicyTest(unittest.TestCase):
def test_non_implemented(self):
"""
Code coverage for interface-style base class
"""
policy = LoadBalancingPolicy()
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
def test_instance_check(self):
self.assertRaises(TypeError, Cluster, load_balancing_policy=RoundRobinPolicy)
class RoundRobinPolicyTest(unittest.TestCase):
def test_basic(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_multiple_query_plans(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in xrange(20):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_single_host(self):
policy = RoundRobinPolicy()
policy.populate(None, [0])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [0])
def test_status_updates(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
policy.on_down(0)
policy.on_remove(1)
policy.on_up(4)
policy.on_add(5)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [2, 3, 4, 5])
def test_thread_safety(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
def check_query_plan():
for i in range(100):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
threads = [Thread(target=check_query_plan) for i in range(4)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
def test_thread_safety_during_modification(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
errors = []
def check_query_plan():
try:
for i in xrange(100):
list(policy.make_query_plan())
except Exception as exc:
errors.append(exc)
def host_up():
for i in xrange(1000):
policy.on_up(randint(0, 99))
def host_down():
for i in xrange(1000):
policy.on_down(randint(0, 99))
threads = []
for i in range(5):
threads.append(Thread(target=check_query_plan))
threads.append(Thread(target=host_up))
threads.append(Thread(target=host_down))
# make the GIL switch after every instruction, maximizing
# the chace of race conditions
if six.PY2:
original_interval = sys.getcheckinterval()
else:
original_interval = sys.getswitchinterval()
try:
if six.PY2:
sys.setcheckinterval(0)
else:
sys.setswitchinterval(0.0001)
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
finally:
if six.PY2:
sys.setcheckinterval(original_interval)
else:
sys.setswitchinterval(original_interval)
if errors:
self.fail("Saw errors: %s" % (errors,))
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in range(4):
policy.on_down(i)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class DCAwareRoundRobinPolicyTest(unittest.TestCase):
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
def test_with_remotes(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
local_hosts = set(h for h in hosts if h.datacenter == "dc1")
remote_hosts = set(h for h in hosts if h.datacenter != "dc1")
# allow all of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
self.assertEqual(set(qplan[2:]), remote_hosts)
# allow only one of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
used_remotes = set(qplan[2:])
self.assertEqual(1, len(used_remotes))
self.assertIn(qplan[2], remote_hosts)
# allow no remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(2, len(qplan))
self.assertEqual(local_hosts, set(qplan))
def test_get_distance(self):
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(Mock(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(Mock(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(Mock(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_nodes(self):
"""
Ensure query plan for an empty cluster will execute without errors
"""
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, [])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_default_dc(self):
host_local = Host(1, SimpleConvictionPolicy, 'local')
host_remote = Host(2, SimpleConvictionPolicy, 'remote')
host_none = Host(1, SimpleConvictionPolicy)
# contact point is '1'
cluster = Mock(contact_points=[1])
# contact DC first
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_local)
policy.on_add(host_remote)
self.assertNotEqual(policy.local_dc, host_remote.datacenter)
self.assertEqual(policy.local_dc, host_local.datacenter)
# contact DC second
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_remote)
policy.on_add(host_local)
self.assertNotEqual(policy.local_dc, host_remote.datacenter)
self.assertEqual(policy.local_dc, host_local.datacenter)
# no DC
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_none)
self.assertFalse(policy.local_dc)
# only other DC
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_remote)
self.assertFalse(policy.local_dc)
class TokenAwarePolicyTest(unittest.TestCase):
def test_wrap_round_robin(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
return list(islice(cycle(hosts), index, index + 2))
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(RoundRobinPolicy())
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
other = set(h for h in hosts if h not in replicas)
self.assertEqual(replicas, qplan[:2])
self.assertEqual(other, set(qplan[2:]))
# Should use the secondary policy
for i in range(4):
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set(hosts))
def test_wrap_dc_aware(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
# return one node from each DC
if index % 2 == 0:
return [hosts[0], hosts[2]]
else:
return [hosts[1], hosts[3]]
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
# first should be the only local replica
self.assertIn(qplan[0], replicas)
self.assertEqual(qplan[0].datacenter, "dc1")
# then the local non-replica
self.assertNotIn(qplan[1], replicas)
self.assertEqual(qplan[1].datacenter, "dc1")
# then one of the remotes (used_hosts_per_remote_dc is 1, so we
# shouldn't see two remotes)
self.assertEqual(qplan[2].datacenter, "dc2")
self.assertEqual(3, len(qplan))
class FakeCluster:
def __init__(self):
self.metadata = Mock(spec=Metadata)
def test_get_distance(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings
"""
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
"""
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(self.FakeCluster(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_statement_keyspace(self):
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
replicas = hosts[2:]
cluster.metadata.get_replicas.return_value = replicas
child_policy = Mock()
child_policy.make_query_plan.return_value = hosts
child_policy.distance.return_value = HostDistance.LOCAL
policy = TokenAwarePolicy(child_policy)
policy.populate(cluster, hosts)
# no keyspace, child policy is called
keyspace = None
routing_key = 'routing_key'
query = Statement(routing_key=routing_key)
qplan = list(policy.make_query_plan(keyspace, query))
self.assertEqual(hosts, qplan)
self.assertEqual(cluster.metadata.get_replicas.call_count, 0)
child_policy.make_query_plan.assert_called_once_with(keyspace, query)
# working keyspace, no statement
cluster.metadata.get_replicas.reset_mock()
keyspace = 'working_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key)
qplan = list(policy.make_query_plan(keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key)
# statement keyspace, no working
cluster.metadata.get_replicas.reset_mock()
working_keyspace = None
statement_keyspace = 'statement_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
qplan = list(policy.make_query_plan(working_keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
# both keyspaces set, statement keyspace used for routing
cluster.metadata.get_replicas.reset_mock()
working_keyspace = 'working_keyspace'
statement_keyspace = 'statement_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
qplan = list(policy.make_query_plan(working_keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
class ConvictionPolicyTest(unittest.TestCase):
def test_not_implemented(self):
"""
Code coverage for interface-style base class
"""
conviction_policy = ConvictionPolicy(1)
self.assertRaises(NotImplementedError, conviction_policy.add_failure, 1)
self.assertRaises(NotImplementedError, conviction_policy.reset)
class SimpleConvictionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for SimpleConvictionPolicy
"""
conviction_policy = SimpleConvictionPolicy(1)
self.assertEqual(conviction_policy.add_failure(1), True)
self.assertEqual(conviction_policy.reset(), None)
class ReconnectionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for interface-style base class
"""
policy = ReconnectionPolicy()
self.assertRaises(NotImplementedError, policy.new_schedule)
class ConstantReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
"""
Test initialization values
"""
self.assertRaises(ValueError, ConstantReconnectionPolicy, -1, 0)
def test_schedule(self):
"""
Test ConstantReconnectionPolicy schedule
"""
delay = 2
max_attempts = 100
policy = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for i, delay in enumerate(schedule):
self.assertEqual(delay, delay)
def test_schedule_negative_max_attempts(self):
"""
Test how negative max_attempts are handled
"""
delay = 2
max_attempts = -100
try:
ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
self.fail('max_attempts should throw ValueError when negative')
except ValueError:
pass
class ExponentialReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1)
def test_schedule(self):
policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), 64)
for i, delay in enumerate(schedule):
if i == 0:
self.assertEqual(delay, 2)
elif i < 6:
self.assertEqual(delay, schedule[i - 1] * 2)
else:
self.assertEqual(delay, 100)
ONE = ConsistencyLevel.ONE
class RetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# we got enough responses but no data response, so retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_write_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if it's not a BATCH_LOG write, don't retry it
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# retry BATCH_LOG writes regardless of received responses
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_unavailable(self):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
policy = RetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class FallthroughRetryPolicyTest(unittest.TestCase):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
def test_read_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class DowngradingConsistencyRetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=4, received_responses=3,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.THREE)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.TWO)
# retry consistency level goes down based on the # of recv'd responses
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# if we got no responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=0,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough response but no data, retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=3,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# ignore failures on these types of writes
for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=write_type,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.IGNORE)
# downgrade consistency level on unlogged batch writes
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.UNLOGGED_BATCH,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# retry batch log writes at the same consistency level
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# timeout on an unknown write_type
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=None,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# downgrade consistency on unavailable exceptions
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
|
red2ws.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import redis
import threading
import time
import logging
import argparse
import traceback
import sys
import os
import warnings
warnings.filterwarnings('ignore')
try:
import simplejson as json
except ImportError:
import json
try:
import asyncio
except ImportError:
pass
class nsCaptureException(object):
iType=""
@property
def Type(self):return str(self.iType)
@Type.setter
def Type(self,aValue):self.iType=aValue
iValue=""
@property
def Value(self):return str(self.iValue)
@Value.setter
def Value(self,aValue):self.iValue=aValue
iTrace=""
@property
def Trace(self):return self.iTrace
@property
def TraceStr(self):return "".join(traceback.format_tb(self.iTrace)).replace("\n","").replace(" "," ")
@Trace.setter
def Trace(self,aValue):self.iTrace=aValue
def __init__(self,aInfo):self.Type, self.Value, self.Trace = aInfo
def __str__(self):return "Error:"+self.Value + " Type:" + self.Type + "TraceBack:" + self.TraceStr
def Bytes2Str(aValue):
iRet=aValue
try:
if isinstance(aValue,bytes):
iRet=aValue.decode('utf-8')
else:
iRet=str(aValue)
except:
pass
return iRet
def IsPython3():return sys.version_info[0]>2
PYTHON3=IsPython3()
if PYTHON3:
#Manage error when in tornado call write_message from another thread
import tornado.platform.asyncio
import asyncio
asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy())
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
class WSHandler(tornado.websocket.WebSocketHandler):
@property
def Th(self):return self._Th
@Th.setter
def Th(self,aValue):self._Th=aValue
@property
def Parent(self):return self.application.Server
@property
def RemoteIp(self):return self.request.remote_ip
@property
def Key(self):return self._Key
@Key.setter
def Key(self,aValue):
self._Key=aValue
@property
def Logger(self):
if self._Logger==None:self._Logger=self.Parent.Logger
return self._Logger
@Logger.setter
def Logger(self,aValue):self._Logger=aValue
@property
def Debug(self):
if self._Debug==None:self._Debug=self.Parent.Debug
return self._Debug
@Debug.setter
def Debug(self,aValue):self._Debug=aValue
@property
def Running(self):return self._Running
@Running.setter
def Running(self,aValue):self._Running=aValue
@property
def WebSockets(self):
if self._WebSockets==None:self._WebSockets=self.Parent.WebSockets
return self._WebSockets
@WebSockets.setter
def WebSockets(self,aValue):self._WebSockets=aValue
@property
def Redis(self):
if self._Redis==None:self._Redis=self.Parent.Redis
return self._Redis
@Redis.setter
def Redis(self,aValue):self._Redis=aValue
@property
def RedisWriter(self):
if self._RedisWriter==None:self._RedisWriter=self.Parent.RedisWriter
return self._RedisWriter
@RedisWriter.setter
def RedisWriter(self,aValue):self._RedisWriter=aValue
@property
def RedisSubscription(self):
if self._RedisSubscription==None:
self._RedisSubscription=self.Redis.pubsub()
if self.Key[-1]!="*":
self._RedisSubscription.subscribe(self.Key)
else:
self._RedisSubscription.psubscribe(self.Key)
return self._RedisSubscription
@RedisSubscription.setter
def RedisSubscription(self,aValue):
if self._RedisSubscription!=None:
self._RedisSubscription.unsubscribe(self.Key)
self._RedisSubscription=aValue
def DebugMsg(self,aMsg):
if self.Debug:self.Logger.debug(aMsg)
def StartListener(self):
if self.Running==False:
if self not in self.WebSockets:
self.WebSockets.append(self)
self.Running=True
self.Th=threading.Thread(target = self.Loop)
self.Th.daemon=True
self.Th.start()
def StopListener(self):
if self.Running:
if self in self.WebSockets:
self.RedisSubscription=None
self.close()
self.WebSockets.remove(self)
self.Running=False
self.Th=None
def Loop(self):
iMsg=None
self.DebugMsg("Start Loop")
while self.Running:
time.sleep(0.001)
try:
iMsg=self.RedisSubscription.get_message(timeout=1)
if iMsg:
iChannel=Bytes2Str(iMsg['channel'])
iType=Bytes2Str(iMsg['type'])
iMatch=False
if self.Key[-1]=="*":
iMatch =self.Key[:-1] in iChannel
else:
iMatch=iChannel==self.Key
if iMatch:self.Write(json.dumps(iMsg))
if self.Running==False:break
except redis.ConnectionError as E:
iError=str(E) + ":" + " Listener " + str(nsCaptureException(sys.exc_info())) + " (" + str(self.Key) + ":" + str(iMsg) + ")"
self.Logger.error(iError)
except Exception as E:
iError = "red2ws.Loop(): " + str(E) + ":" + str(nsCaptureException(sys.exc_info())) + " (" + str(iMsg) + ")"
self.Logger.error(iError )
self.DebugMsg("Stop Loop")
def open(self,aUrl):
self._Contador=0
self.Key=aUrl
self.StartListener()
self.DebugMsg("OPEN: " + aUrl +" channel " + str(self.Key) + " from " + self.RemoteIp)
time.sleep(.5)
def on_message(self, message):
self.RedisWriter.publish(self.Key,message)
def check_origin(self, origin):return True
def on_close(self,aCode = None, aReason = None):
self.DebugMsg("DEBUG: CLOSE WS (" + str(self.Key) + ") from " + self.RemoteIp + " " + str(aCode) + ":" + str(aReason))
self.StopListener()
def Write(self,aMsg):
try:
iObj=aMsg
self._Contador=self._Contador+1
try:
iData=iObj
if PYTHON3:iData=iData.encode('utf-8')
self.write_message(iData)
except tornado.websocket.WebSocketClosedError as E:
pass
except Exception as e:
pass
if self.Debug and self._Contador % 100 ==0:self.DebugMsg("DEBUG: PUBLISH WS (" + str(self.Id) + ") to " + self.RemoteIp + ": " + str(self._Contador))
except Exception as E:
iError = "WSHandler.Write(): " + str(E) + ":" + str(nsCaptureException(sys.exc_info())) + " (" + self.Key + ":" + str(aMsg) + ")"
self.Parent.Logger.error(iError )
def __init__(self, *args, **kwargs):
self._RedisSubscription=None
self._Redis=None
self._RedisWriter=None
self._Th=None
self._WebSockets=None
self._Debug=None
self._Contador=0
self._Running=False
self._Logger=None
self._Parent=None
self._Key=None
self._Parent = kwargs.pop('aParent')
super(WSHandler, self).__init__(*args, **kwargs)
class red2ws(object):
@property
def SslCertFile(self):return self._SslCertFile
@SslCertFile.setter
def SslCertFile(self,aValue):self._SslCertFile=aValue
@property
def SslKeyFile(self):return self._SslKeyFile
@SslKeyFile.setter
def SslKeyFile(self,aValue):self._SslKeyFile=aValue
@property
def Debug(self):return self._Debug
@Debug.setter
def Debug(self,aValue):self._Debug=aValue
@property
def LogFileName(self):
if self._LogFileName==None:self._LogFileName=__file__+ ".log"
return self._LogFileName
@property
def Logger(self):
if self._Logger==None:
self._Logger=logging.getLogger(os.path.basename(self.LogFileName).split(".")[0])
self._Logger.setLevel(logging.DEBUG)
iHandler=logging.handlers.RotatingFileHandler(self.LogFileName, maxBytes=10*1024*1024, backupCount=5)
iFormater=logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
iHandler.setFormatter(iFormater)
self._Logger.addHandler(iHandler)
iHandler = logging.StreamHandler()
iHandler.setFormatter(iFormater)
self._Logger.addHandler(iHandler)
return self._Logger
@property
def WebSockets(self):
if self._WebSockets==None:self._WebSockets=self.Parent.WebSockets
return self._WebSockets
@WebSockets.setter
def WebSockets(self,aValue):self._WebSockets=aValue
@property
def LocalIp(self):
if self._LocalIp==None:self._LocalIp=socket.gethostbyname(socket.gethostname())
return self._LocalIp
@property
def Url(self):
if self._Url==None:self._Url=r'/(.+))'
return self._Url
@Url.setter
def Url(self,aValue):self._Url=aValue
@property
def App(self):
if self._App==None:
self._App=tornado.web.Application([(self.Url, WSHandler,{'aParent':self})])
self._App.Server=self
return self._App
@App.setter
def App(self,aValue):self._App=aValue
@property
def Client(self):
if self._Client==None:
iSsl=None
if os.path.exists(self.SslCertFile) and os.path.exists(self.SslKeyFile):iSsl={"certfile": self.SslCertFile,"keyfile": self.SslKeyFile}
self._Client=tornado.httpserver.HTTPServer(self.App,ssl_options=iSsl)
self._Client.listen(self.Port,address=self.Host)
return self._Client
@Client.setter
def Client(self,aValue):
self._Client=aValue
@property
def Port(self):return self._Port
@Port.setter
def Port(self,aValue):self._Port=aValue
@property
def Host(self):return self._Host
@Host.setter
def Host(self,aValue):self._Host=aValue
def StartWs(self):
iClient=self.Client
tornado.ioloop.IOLoop.instance().start()
def StopWs(self):
tornado.ioloop.IOLoop.instance().stop()
self.Client=None
def Start(self):
self.StartWs()
def Stop(self):
self.StopWs()
def Reconnect(self,aMax=10):
iRet=False
iCont=1
while 1:
try:
iCont=iCont+1
self.Redis.ping()
except redis.ConnectionError as E:
if iCont>aMax:break
time.sleep(aMax)
else:
self.Redis=None
iRet=True
break
@property
def REDISIp(self):return self._REDISIp
@REDISIp.setter
def REDISIp(self,aValue):self._REDISIp=aValue
@property
def REDISPort(self):return self._REDISPort
@REDISPort.setter
def REDISPort(self,aValue):self._REDISPort=aValue
@property
def REDISPassword(self):return self._REDISPassword
@REDISPassword.setter
def REDISPassword(self,aValue):self._REDISPassword=aValue
@property
def REDISCaCerts(self):return self._REDISCaCerts
@REDISCaCerts.setter
def REDISCaCerts(self,aValue):self._REDISCaCerts=aValue
@property
def REDISDb(self):return self._REDISDb
@REDISDb.setter
def REDISDb(self,aValue):self._REDISDb=aValue
@property
def Redis(self):
if self._Redis==None:
try:
iPass=None
if self.REDISPassword!="":iPass=self.REDISPassword
iSsl=False
iCa=None
if os.path.exists(self.REDISCaCerts):
iSsl=True
iCa=self.REDISCaCerts
self._Redis=redis.StrictRedis(host=self.REDISIp, port=self.REDISPort,password=iPass,ssl=False,ssl_ca_certs=iCa,db=self.REDISDb)
except Exception as E:
iError = "red2ws.Redis(): " + str(E) + ":" + str(nsCaptureException(sys.exc_info())) + " (" + self.REDISIp + ":" + str(self.REDISPort) + ")"
self.Logger.error(iError )
self._Redis=None
return self._Redis
@property
def RedisWriter(self):
if self._RedisWriter==None:
try:
iPass=None
if self.REDISPassword!="":iPass=self.REDISPassword
iSsl=False
iCa=None
if os.path.exists(self.REDISCaCerts):
iSsl=True
iCa=self.REDISCaCerts
self._RedisWriter=redis.StrictRedis(host=self.REDISIp, port=self.REDISPort,password=iPass,ssl=False,ssl_ca_certs=iCa,db=self.REDISDb)
except Exception as E:
iError = "red2ws.Redis(): " + str(E) + ":" + str(nsCaptureException(sys.exc_info())) + " (" + self.REDISIp + ":" + str(self.REDISPort) + ")"
self.Logger.error(iError )
self._RedisWriter=None
return self._RedisWriter
@RedisWriter.setter
def RedisWriter(self,aValue):self._RedisWriter=aValue
def __init__(self,aHost="0.0.0.0",aPort=8080,aUrl=r'/(.+)',aRedisIp="127.0.0.1",aRedisPort=7001,aSslCertFile="",aSslKeyFile="",aRedisPassword="",aREDISCaCerts="",aRedisDb=0,aDebug=False):
self._App=None
self._LocalIp=None
self._Url=None
self._Client=None
self._ClientSubs=None
self._REDISPubSub=None
self._Logger=None
self._LogFileName=None
self._Debug=aDebug
self._WebSockets=[]
self._Th=None
self._Port=aPort
self._Host=aHost
self._REDISPort=aRedisPort
self._REDISPassword=aRedisPassword
self._REDISCaCerts=aREDISCaCerts
self._Url=aUrl
self._REDISIp=aRedisIp
self._REDISPort=aRedisPort
self._Redis=None
self._RedisWriter=None
self._SslCertFile=aSslCertFile
self._SslKeyFile=aSslKeyFile
self._REDISDb=aRedisDb
def GetParams():
iParser = argparse.ArgumentParser("WebSocket Server Redis")
iParser.add_argument("--websockport",type=int,help="WebSocket Port",default=8080)
iParser.add_argument("--websockhost",type=str,help="WebSocket Host",default="0.0.0.0")
iParser.add_argument("--websockurl",type=str,help="WebSocket Url",default=r'/(.+)')
iParser.add_argument("--websocksslcertfile",type=str,help="ssl cert file for a wss:// protocol",default="")
iParser.add_argument("--websocksslkeyfile",type=str,help="ssl key file for a wss:// protocol",default="")
iParser.add_argument("--redishost",type=str,help="Redis Host",default="127.0.0.1")
iParser.add_argument("--redisport",type=int,help="Redis Port",default=6379)
iParser.add_argument("--redisdb",type=int,help="Redis db",default=0)
iParser.add_argument("--redispassword",type=str,help="Redis password",default="")
iParser.add_argument("--rediscacerts",type=str,help="Redis ca certs for ssl connections",default="")
iParser.add_argument("--debug",type=int,help="Debug",default=0)
return iParser.parse_args()
def Main():
iObj=None
try:
iParams=GetParams()
iObj=red2ws(iParams.websockhost,iParams.websockport,iParams.websockurl,iParams.redishost,iParams.redisport,iParams.websocksslcertfile,iParams.websocksslkeyfile,iParams.redispassword,iParams.rediscacerts,iParams.redisdb,iParams.debug==1)
iObj.Start()
except KeyboardInterrupt as Ke:
pass
except Exception as E:
iError = "MAIN(): " + str(E) + ":" + str(nsCaptureException(sys.exc_info()))
iError=iError.replace("\n"," ")
iObj.Logger.error(iError)
finally:
if iObj!=None:
iObj.Stop()
if __name__ == "__main__":
Main()
|
data_source.py
|
import time
import Queue
import threading
import logging
log = logging.getLogger(__name__)
def mpu6050_data_generator(dt, stopped):
from mpu6050 import mpu6050
sensor = mpu6050(0x68)
# try to poll the sensor until it configures properly
started_at = time.time()
last_err = None
while time.time() < started_at + 1:
try:
sensor.get_all_data()
except IOError as err:
last_err = err
time.sleep(0.02)
else:
break
else:
raise IOError('MPU6050 reading fails: {}'.format(str(last_err)))
while not stopped.isSet():
start = time.time()
accel = sensor.get_accel_data()
gyro = sensor.get_gyro_data()
temp = sensor.get_temp()
item = accel['x'], accel['y'], accel['z'], gyro['x'], gyro['y'], gyro['z'], temp
yield item
duration = time.time() - start
if dt > duration:
time.sleep(dt - duration)
def motiontracker_data_generator(mpu_generator, tracker, calibrate_n=0):
if calibrate_n > 0:
log.info('starting calibration, don\'t move the device...')
tracker.start_calibration()
for _ in range(calibrate_n):
item = next(mpu_generator)
tracker.add_data(*item[:-1]) # last item is temperature
tracker.finish_calibration()
log.info('calibration finished')
for item in mpu_generator:
tracker.add_data(*item[:-1]) # last item is temperature
yield item + tracker.angles + tracker.coordinates
class DataStreamer(object):
def __init__(self, generator, max_queue_size=1000, consumer_timeout=0.01):
self.max_queue_size = max_queue_size
self.consumer_timeout = consumer_timeout
self._consumers = {}
self._next_id = 1
self._lock = threading.Lock()
self._stopped = threading.Event()
self._generator = generator
self._thread = threading.Thread(target=self._run)
self._thread.setDaemon(True)
self._thread.start()
def add_consumer(self, function):
with self._lock:
if self._stopped.isSet():
return
queue = Queue.Queue(maxsize=self.max_queue_size)
consumer_thread = threading.Thread(target=self._consumer_run,
args=(queue, function))
consumer_thread.setDaemon(True)
consumer_thread.start()
new_id = self._next_id
self._next_id += 1
self._consumers[new_id] = (queue, function, consumer_thread)
log.info('added new consumer: %r -> %s', function, new_id)
return new_id
def request_stop(self):
self._stopped.set()
def remove_consumer(self, consumer_id):
with self._lock:
q, f, t = self._consumers.pop(consumer_id)
try:
while True:
q.get_nowait()
except Queue.Empty:
q.put(None)
blocking = threading.currentThread().ident != t.ident
log.info('removing consumer: %r -> %s %s',
f, consumer_id, '(blocking)' if blocking else '')
if blocking:
t.join()
def wait_for_end(self):
self._thread.join()
with self._lock:
consumers = list(self._consumers.values())
for q, f, t in consumers:
t.join()
def _run(self):
try:
for item in self._generator:
if self._stopped.isSet():
break
with self._lock:
consumers = list(self._consumers.values())
for q, _, _ in consumers:
try:
q.put(item, timeout=self.consumer_timeout)
except Queue.Full:
pass
except:
log.exception('data generator got an error')
log.info('data generator finished')
with self._lock:
for q, _, _ in self._consumers.values():
q.put(None)
for _, _, t in self._consumers.values():
t.join()
def _consumer_run(self, queue, function):
while not self._stopped.isSet():
item = queue.get()
if item is None:
break
function(*item)
log.info('stopped consumer %r', function)
def dump_to_file(generator, filename, n_entries):
log.info("start dumping to file %s from %r", filename, generator)
with open(filename, 'w') as f:
for _, item in zip(range(n_entries), generator):
f.write(' '.join(map(str, item)) + '\n')
yield item
log.info("DONE dumping to file %s from %r", filename, generator)
for item in generator:
yield item
|
test_sequence.py
|
from datetime import datetime
import logging
from threading import Thread
import traceback
from typing import List
from mats.test import Test
from mats.archiving import ArchiveManager
Sequence = List[Test]
class TestSequence:
"""
The TestSequence will "knit" the sequence together by taking the test \
objects and appropriately passing them through the automated testing \
process.
:param sequence: a list of Tests
:param archive_manager: an instance of ``ArchiveManager`` which will \
contain the path and format-specific information
:param auto_start: True if test is to be automatically started
:param auto_run: True if the test is to be continually executed
:param callback: function to call on each test sequence completion; \
callback will be required to accept one parameter, which is the \
dictionary of values collected over that test iteration
:param setup: function to call before the test sequence
:param teardown: function to call after the test sequence is complete, \
even if there was a problem; common to have safety issues addressed here
:param on_close: function to call when the functionality is complete; \
for instance, when a GUI closes, test hardware may need to be de-allocated
:param loglevel: the logging level
"""
def __init__(self, sequence: Sequence,
archive_manager: ArchiveManager = None,
auto_start=False, auto_run=False, callback: callable = None,
setup: callable = None, teardown: callable = None,
on_close: callable = None,
loglevel=logging.INFO):
self._logger = logging.getLogger(self.__class__.__name__)
self._logger.setLevel(loglevel)
# protection just in case one or more of the instances contained
# within the sequence were not instantiated properly, this will
# instantiate them
for i, test in enumerate(sequence):
if not isinstance(test, Test):
sequence[i] = test()
if not self.__validate_sequence(sequence):
raise ValueError('test monikers are not uniquely identified')
self._sequence = sequence
self._archive_manager = archive_manager
self._auto_run = auto_run
self._callback = callback
self._setup = setup
self._teardown = teardown
self._on_close = on_close
self.in_progress = False
self._aborted = False
self._test_data = {
'datetime': {'value': str(datetime.now())},
'pass': {'value': True},
'failed': {'value': []}
}
self.current_test = None
self._current_test_number = 0
if auto_start:
self._logger.info('"auto_start" flag is set, '
'beginning test sequence')
self.start()
def __getitem__(self, test_element: (str, Test)):
test = None
if isinstance(test_element, str):
for t in self._sequence:
if t.moniker == test_element:
test = t
break
elif isinstance(test_element, Test):
for t in self._sequence:
if t == test_element:
test = t
break
else:
raise TypeError(
f'"{test_element}" is of type "{type(test_element)}"; '
f'expected "str" or "Test"')
if test is not None:
return test
raise KeyError(f'test "{test_element}" does not appear to be defined')
@property
def tests(self):
"""
Returns instances of all tests contained within the ``TestSequence``
:return: all tests as a list
"""
return [test for test in self._sequence]
@property
def test_names(self):
"""
Returns the names of the tests contained within the ``TestSequence``
:return: the names of the tests as a list
"""
return [test.moniker for test in self._sequence]
@property
def ready(self):
"""
Returns True if the test sequence is ready for another go at it, \
False if not
:return: True or False
"""
return not self.in_progress
@property
def is_passing(self):
"""
Returns True if the test sequence is currently passing, else False
:return: True or False
"""
return self._test_data['pass'].get('value')
@property
def is_aborted(self):
"""
Returns True if the test sequence has been aborted, else False
:return: True or False
"""
return self._aborted
@property
def failed_tests(self):
"""
Return a list of the tests which failed.
:return: list of tests that failed
"""
return self._test_data['failed'].get('value')
@property
def progress(self):
"""
Returns a tuple containing (current_test_number, total_tests) in \
order to give an indication of the progress of the test sequence.
:return: tuple containing (current_test_number, total_tests)
"""
return (self._current_test_number,
len([test.moniker for test in self._sequence]))
def close(self):
"""
Allows higher level code to call the close functionality.
"""
if self._on_close is not None:
self._on_close()
def __validate_sequence(self, sequence: List[Test]):
moniker_set = set([t.moniker for t in sequence])
if len(moniker_set) != len(sequence):
return False
return True
def abort(self):
"""
Abort the current test sequence.
:return: None
"""
self._aborted = True
def start(self):
"""
Start a test sequence. Will only work if a test sequence isn't \
already in progress.
:return: None
"""
if self.in_progress:
self._logger.warning('cannot begin another test when test is '
'currently in progress')
return
self.in_progress = True
thread = Thread(target=self._run_test)
thread.start()
def _run_test(self):
"""
Runs one instance of the test sequence. Executes continually if the \
auto_run flag was set on initialization.
:return: None
"""
self._logger.info('-' * 80)
self._aborted = False
self._test_data = {
'datetime': {'value': str(datetime.now())},
'pass': {'value': True},
'failed': {'value': []}
}
self._current_test_number = 0
for test in self._sequence:
test.reset()
if self._setup is not None:
self._setup()
# begin the test sequence
for i, test in enumerate(self._sequence):
self._current_test_number = i
if self._aborted:
break
self.current_test = test.moniker
try:
test._setup(is_passing=self.is_passing)
except Exception as e:
self._logger.critical(f'critical error during '
f'setup of "{test}": {e}')
self._logger.critical(str(traceback.format_exc()))
self.abort()
test.fail()
if test.aborted:
self.abort()
break
try:
test_result = test._execute(is_passing=self.is_passing)
except Exception as e:
test_result = None
self._logger.critical(f'critical error during '
f'execution of "{test}": {e}')
self._logger.critical(str(traceback.format_exc()))
self.abort()
test.fail()
if test.aborted:
self.abort()
test.fail()
break
if test_result is not None:
self._test_data[test.moniker] = {'value': test_result}
criteria = test.criteria
if criteria is not None:
self._test_data[test.moniker]['criteria'] = criteria
try:
test._teardown(is_passing=self.is_passing)
except Exception as e:
self._logger.critical(f'critical error during '
f'teardown of "{test}": {e}')
self._logger.critical(str(traceback.format_exc()))
self.abort()
test.fail()
if test.aborted:
self.abort()
break
if not test._test_is_passing:
self._test_data['pass']['value'] = False
self._test_data['failed']['value'].append(test.moniker)
# if any data was specifically stored within a test,
# then retrieve it and store it within the sequence
# test data
for k, v in test.saved_data.items():
self._test_data[k]['value'] = v
if not self._aborted and self._archive_manager is not None:
self._archive_manager.save(self._test_data)
self._logger.info('test sequence complete')
self._logger.debug(f'test results: {self._test_data}')
if self._teardown is not None:
self._teardown()
if self._callback is not None:
self._logger.info(f'executing user-supplied callback function '
f'"{self._callback}"')
self._callback(self._test_data)
self.in_progress = False
if self._auto_run:
if not self._aborted:
self._logger.info('"auto_run" flag is set, looping')
thread = Thread(target=self._run_test)
thread.start()
else:
self._logger.warning('"auto_run" flag is set, but the test '
'sequence was prematurely aborted')
|
trainer.py
|
# coding: utf-8
###
# @file trainer.py
# @author Arsany Guirguis <arsany.guirguis@epfl.ch>
#
# @section LICENSE
#
# Copyright (c) 2020 Arsany Guirguis.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# @section DESCRIPTION
#
# ByzSGD implementation using Garfiled++ library, which is based on RPC
# this file can be used by both the parameter server and the workers
# original paper: https://dl.acm.org/doi/pdf/10.1145/3382734.3405695
# Few important notes about this implementation:
# 1) The same GAR is used to aggregate both gradients and models
# 2) We assume the asynchronous algorithm here
# 3) Workers do not pull models from multiple parameter servers; the latter controls the learning process. In turn, the gather step among PSes is executed in each and every step.
###
#!/usr/bin/env python
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed.rpc as rpc
import torch.optim as optim
from torch.distributed.rpc import RRef, rpc_async, remote
from time import time
import argparse
import sys
import json
import threading
import garfieldpp
from garfieldpp.worker import Worker
from garfieldpp.byzWorker import ByzWorker
from garfieldpp.server import Server
from garfieldpp.tools import get_bytes_com,convert_to_gbit, adjust_learning_rate
import aggregators
CIFAR_NUM_SAMPLES = 50000
#First, parse the inputs
parser = argparse.ArgumentParser(description="ByzSGD implementation using Garfield++ library", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--master",
type=str,
default="",
help="Master node in the deployment. This node takes rank 0, usually the first PS.")
parser.add_argument("--rank",
type=int,
default=0,
help="Rank of a process in a distributed setup.")
parser.add_argument("--dataset",
type=str,
default="mnist",
help="Dataset to be used, e.g., mnist, cifar10,...")
parser.add_argument("--batch",
type=int,
default=32,
help="Minibatch size to be employed by each worker.")
parser.add_argument("--num_ps",
type=int,
default=1,
help="Number of parameter servers in the deployment.")
parser.add_argument("--num_workers",
type=int,
default=1,
help="Number of workers in the deployment.")
parser.add_argument("--fw",
type=int,
default=0,
help="Number of declared Byzantine workers.")
parser.add_argument("--fps",
type=int,
default=0,
help="Number of declared Byzantine parameter servers.")
parser.add_argument("--model",
type=str,
default='convnet',
help="Model to be trained, e.g., convnet, cifarnet, resnet,...")
parser.add_argument("--loss",
type=str,
default='nll',
help="Loss function to optimize against.")
parser.add_argument("--optimizer",
type=str,
default='sgd',
help="Optimizer to use.")
parser.add_argument("--opt_args",
type=json.loads,
default={'lr':'0.1'},
help="Optimizer arguments; passed in dict format, e.g., '{\"lr\":\"0.1\"}'")
parser.add_argument("--num_iter",
type=int,
default=5000,
help="Number of training iterations to execute.")
parser.add_argument("--gar",
type=str,
default='average',
help="Aggregation rule for aggregating gradients.")
parser.add_argument('--acc_freq',
type=int,
default=100,
help="The frequency of computing accuracy while training.")
parser.add_argument('--bench',
type=bool,
default=False,
help="If True, time elapsed in each step is printed.")
parser.add_argument('--log',
type=bool,
default=False,
help="If True, accumulated loss at each iteration is printed.")
FLAGS = parser.parse_args(sys.argv[1:])
master = FLAGS.master
assert len(master) > 0
rank = FLAGS.rank
assert rank >= 0
num_ps = FLAGS.num_ps
assert num_ps >= 1
num_workers = FLAGS.num_workers
assert num_workers >= 1
world_size = num_workers + num_ps
fw = FLAGS.fw
assert fw*2 < num_workers
fps = FLAGS.fps
assert fps*2 < num_ps
dataset = FLAGS.dataset
assert len(dataset) > 0
batch = FLAGS.batch
assert batch >= 1
model = FLAGS.model
assert len(model) > 0
loss = FLAGS.loss
assert len(loss) > 0
optimizer = FLAGS.optimizer
assert len(optimizer) > 0
opt_args = FLAGS.opt_args
for k in opt_args:
opt_args[k] = float(opt_args[k])
assert opt_args['lr']
num_iter = FLAGS.num_iter
assert num_iter > 0
gar = FLAGS.gar
assert len(gar) > 0
acc_freq = FLAGS.acc_freq
assert(acc_freq > 10)
bench = FLAGS.bench
if bench:
from timeit import timeit
else:
timeit = None
log = FLAGS.log
#os.environ['CUDA_VISIBLE_DEVICES'] = str((rank%2))
print("**** SETUP AT NODE {} ***".format(rank))
print("Number of workers: ", num_workers)
print("Number of servers: ", num_ps)
print("Number of declared Byzantine workers: ", fw)
print("Number of declared Byzantine parameter servers: ", fps)
print("GAR: ", gar)
print("Dataset: ", dataset)
print("Model: ", model)
print("Batch size: ", batch)
print("Loss function: ", loss)
print("Optimizer: ", optimizer)
print("Optimizer Args", opt_args)
print("Benchmarking? ", bench)
print("Logging loss at each iteration?", log)
print("------------------------------------")
sys.stdout.flush()
lr = opt_args['lr']
#initiating the GAR
gar = aggregators.gars.get(gar)
assert gar is not None
os.environ['MASTER_ADDR'] = master
os.environ['MASTER_PORT'] = '29500'
torch.manual_seed(1234) #For reproducibility
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234) #For reproducibility
if bench:
torch.backends.cudnn.benchmark=True
#convention: low ranks are reserved for parameter servers
if rank < num_ps:
rpc.init_rpc('ps:{}'.format(rank), rank=rank, world_size=world_size)
#Initialize a parameter server and write the training loop
ps = Server(rank, world_size, num_workers,1, fw, fps, 'worker:', 'ps:', batch, model, dataset, optimizer, **opt_args)
scheduler = torch.optim.lr_scheduler.MultiStepLR(ps.optimizer, milestones=[150, 250, 350], gamma=0.1) #This line shows sophisticated stuff that can be done out of the Garfield++ library
start_time = time()
iter_per_epoch = CIFAR_NUM_SAMPLES//(num_workers * batch) #this value records how many iteration per sample
print("One EPOCH consists of {} iterations".format(iter_per_epoch))
sys.stdout.flush()
for i in range(num_iter):
if i%(iter_per_epoch*30) == 0 and i!=0: #One hack for better convergence with Cifar10
lr*=0.1
adjust_learning_rate(ps.optimizer, lr)
#training loop goes here
def train_step():
if bench:
bytes_rec = get_bytes_com() #record number of bytes sent before the training step to work as a checkpoint
with torch.autograd.profiler.profile(enabled=bench) as prof:
gradients = ps.get_gradients(i, num_workers-fw) #get_gradients(iter_num, num_wait_wrk)
aggr_grad = gar(gradients=gradients, f=fw) #aggr_grad = gar.aggregate(gradients)
ps.update_model(aggr_grad)
#Gather step goes here...basically, get models from other servers, aggregate them, and write the result
models = ps.get_models(num_ps-fps)
aggr_models = gar(gradients=models,f=fps)
ps.write_model(aggr_models)
ps.model.to('cpu:0')
if bench:
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
bytes_train = get_bytes_com()
print("Consumed bandwidth in this iteration: {} Gbits".format(convert_to_gbit(bytes_train-bytes_rec)))
# print("Memory allocated to GPU {} Memory cached on GPU {}".format(torch.cuda.memory_allocated(0), torch.cuda.memory_cached(0)))
sys.stdout.flush()
if timeit is not None:
res = timeit(train_step,number=1)
print("Training step {} takes {} seconds".format(i,res))
sys.stdout.flush()
else:
train_step()
if i%iter_per_epoch == 0:
def test_step():
acc = ps.compute_accuracy()
num_epochs = i/iter_per_epoch
print("Epoch: {} Accuracy: {} Time: {}".format(num_epochs,acc,time()-start_time))
sys.stdout.flush()
if timeit is not None:
res = timeit(test_step,number=1)
print("Test step takes {} seconds".format(res))
else:
# test_step() #Though threading is a good idea, applying it here messes the use of CPU with GPU
# if model.startswith('resnet') and i!=0:
# scheduler.step()
threading.Thread(target=test_step).start()
else:
rpc.init_rpc('worker:{}'.format(rank-num_ps), rank=rank, world_size=world_size)
#initialize a worker here
Worker(rank, world_size, num_workers, batch, model, dataset, loss)
# ByzWorker(rank, world_size, num_workers, batch, model, dataset, loss, 'drop')
rpc.shutdown()
|
scantools.py
|
"""
拿到任务后按着插件执行顺序开始执行
modify by judy 2020/03/18
新增回馈 by judy 2020/04/08
暂时增加断点续扫的功能,目前只简单的增加以下
记录扫描到了哪个port
"""
from pathlib import Path
import re
import threading
import time
import traceback
import uuid
from queue import Empty, Queue
import IPy
from datacontract import EClientBusiness
from ..config_client import basic_client_config
clientbusiness = eval(basic_client_config.clientbusiness)
if (
EClientBusiness.ALL.value in clientbusiness
or EClientBusiness.IScanTask.value in clientbusiness
):
from geoiploc.geoiploc import GeoIPLoc
from datacontract import IscanTask, ECommandStatus
from idownclient.config_scanner import (
max_nmap_ip,
max_nscan_threads,
max_zscan_threads,
max_zgrab2_threads,
max_zscan_ipranges,
max_zscan_ip,
max_vulns_threads,
)
from .ipprocesstools import IpProcessTools
from .plugin.dbip.dbipmmdb import DbipMmdb
from .plugin.logicalbanner import LogicalGrabber
from .plugin.nmap.nmap import Nmap
from .plugin.zgrab2.zgrab2 import Zgrab2
from .plugin.zmap.zmap import Zmap
# from .plugin.masscan.masscan import Masscan
from .scanplugbase import ScanPlugBase
from ..clientdatafeedback.scoutdatafeedback import IP, PortInfo, GeoInfo
class ScanTools(ScanPlugBase):
def __init__(self, task: IscanTask):
ScanPlugBase.__init__(self, task)
self.task: IscanTask = task
self.nmap = Nmap()
self.zgrab2 = Zgrab2()
self.zmap = Zmap() # zmap和masscan的功能一样的,现在尝试使用masscan来试下呢
self.logicalgrabber = LogicalGrabber()
# 新增查询ip归属地,modify by judy 2020/03/31
self.dbip = DbipMmdb()
# 初始化的时候去拿cmd里面的host,因为支持了国家二字码,所以单独开一个方法来获取
self.re_iprang = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{2}")
# 新增所有C段ip计数,用于计算进度
self.__c_ipranges_count = 0
self.__all_scan_count = 0
# 已经扫描了的ip段统计
self.__has_scan_count = 0
# 目前的扫描进度:98.99 %
self.__progress: float = 0.00
# 端口队列,用于多线程调用zmap
self.port_queue = Queue()
# 使用字典,便于去重,modify by judy 2020/08/06
# 为了提高效率,将单个ip和C段IP分开处理
self.hosts = {}
self.host = {}
# 文件处理锁,可能会出现同名的文件所以需要加文件锁 modify by judy 20210203
self._file_locker = threading.RLock()
# zmap 待处理的队列,默认为运行状态
self.make_zmap_scan_queue_status = True
# zmap处理队列
self.zmap_queue = Queue()
self.__zmap_scan_thread_state = {}
# namp处理的队列
self.nmap_queue = Queue()
self.__nmap_scan_thread_state = {}
self._nmap_tmp_dict_locker = threading.RLock()
self._nmap_tmp = {}
# zgrab2处理的队列
self.zgrab2_queue = Queue()
self.__zgrab2_scan_thread_state = {}
# 20200917新增的漏洞扫描需求要求匹配漏洞并扫描
self.vulns_queue = Queue()
self.__vulns_scan_thread_state = {}
self._vulns_list = self.task.cmd.stratagyscan.scan.vuls
# 最后的结果线程
self.output_res_queue = Queue()
# 目前有两种数据来源,一种是手动输入的;另外一种是查询国家得到的ip段,
# 因为查询国家得到的ip段比较多,所以需要一个标志来分辨下
# 默认是手动数据,数据来源为国家数据时,则得到的结果一定是seres对象
self.country_flag = False
# 保存端口扫描进度
self.sp = Path("./scan_port_progress.txt")
def process_host(self):
"""
处理传入的需要扫描的host
:return:
"""
log = "开始处理IP段"
self._logger.debug("Start process ip ranges")
self._outprglog(log)
cmdhost: list = self.task.cmd.stratagyscan.scan.hosts
location: dict = self.task.cmd.stratagyscan.scan.location
# 以host的Ip段为主
if cmdhost is not None and len(cmdhost) > 0:
ip_ranges = cmdhost
else:
self.country_flag = True
# 国家的二字码查出来就实在是太多了
ip_ranges = self.get_country_iprange(location)
# modify by judy 2020/08/06优化的本地IP查询,然后优化了根据国家数据查询ip段过多和去重的问题
self.split_long_ip_ranges(ip_ranges)
self._logger.debug("Complete process ip ranges")
# 这里出来就直接将ipranges的数据存入了self.hosts这里也进行一下置零 by judy 20201202
ip_ranges = None
return
def process_port(self):
"""
尝试by judy 2020/03/30
将port加入队列,用于zmap多线程取用
顺便统计下一共需要扫描的数据 modify by judy 2020/04/08
:return:
"""
ports = self.task.cmd.stratagyscan.scan.ports
self._logger.debug(f"Get input {len(ports)} scan port")
for p in ports:
self.port_queue.put(p)
# 这里统计所有需要扫描的网段计数集合
self.__all_scan_count = self.port_queue.qsize() * self.__c_ipranges_count
self._logger.debug(f"There are {self.__all_scan_count} objects will be scan")
with open("./scan_rate_test_result.txt", "a", encoding="utf-8") as fp:
fp.write(f"总共有{len(ports)}个端口需要扫描, 总共将有{self.__all_scan_count}个目标需要扫描")
return
def split_long_ip_ranges(self, inputipdata):
"""
拆分长网段1.1.0.0/14 -> 1.1.1.0/24,1.1.2.0/24....
seres.conn = conn
seres.res = res
:return:
"""
self._logger.debug("Start split long ip ranges to C ip ranges")
if self.country_flag:
# 国家数据
ip_ranges = inputipdata.res
log = "开始处理地区IP数据"
self._logger.debug("Get region ip ranges")
self._outprglog(log)
else:
ip_ranges = inputipdata
log = "开始处理指定IP数据"
self._logger.debug("Get specific ipranges")
self._outprglog(log)
# 存放内网数据
intranet = []
for el in ip_ranges:
# 这里是处理1.1.0.0/24这个网段的
if isinstance(el, str):
if (
not self.country_flag
and IpProcessTools.judge_reserved_ip_addresses(el)
):
intranet.append(el)
self.__c_ipranges_count += 1
else:
try:
low_ip = IPy.IP(el)
if low_ip.prefixlen() < 24:
count = 0
for ip in low_ip:
if count == 0:
self.__c_ipranges_count += 1
ipstr_list = [str(ip) + "/24"]
self.hosts[tuple(ipstr_list)] = 1
count += 1
if count == 256:
# 到这里就是一个网段了
count = 0
else:
self.__c_ipranges_count += 1
ipstr_list = [el]
self.hosts[tuple(ipstr_list)] = 1
except:
# 不是ip段,有可能是域名或者其他东西
self.__c_ipranges_count += 1
ipstr_list = [el]
self.host[tuple(ipstr_list)] = 1
elif isinstance(el, tuple):
# 这里的数据只能是国家的,所以不需要去重
# masscan扫描尝试这样
st = IPy.IP(el[0])
sp = IPy.IP(el[1])
# 直接+/24是不准确的,因为很多查出来的数据并没有255个IP
# 这种数据目前考虑的是单独处理下,但是同时又会带来需要查询的ip过多的问题,先这样做吧,modify by judy 2020/07/22
if sp.int() - st.int() >= 255:
count = 0
for i in range(st.int(), sp.int() + 1):
if count == 0:
self.__c_ipranges_count += 1
ipstr: str = IPy.IP(i).strNormal()
if ipstr.endswith(".0"):
ipstr_list = [(ipstr + "/24")]
self.hosts[tuple(ipstr_list)] = 1
count += 1
if count == 256:
count = 0
else:
iptmp = []
for i in range(st.int(), sp.int() + 1):
o_ipstr: str = IPy.IP(i).strNormal()
if o_ipstr.endswith(".0"):
continue
iptmp.append(o_ipstr)
# 这里也表示一个ip段,所以是这个问题才导致了那个进度计算出问题
self.__c_ipranges_count += 1
self.host[tuple(iptmp)] = 1
else:
raise Exception("Unsupported type")
# 增加内网扫描
if len(intranet) > 0:
for oneport in self.task.cmd.stratagyscan.scan.ports:
with self._file_locker:
nmap_scan_host_path = self.tmppath / f"{str(uuid.uuid1())}"
for shost in intranet:
with nmap_scan_host_path.open("a", encoding="utf-8") as fp:
fp.write(shost + "\n")
self.nmap_queue.put((nmap_scan_host_path, [oneport]))
self._logger.debug(f"本次一共扫描内网{len(intranet)}个目标")
log = f"一共需要扫描{self.__c_ipranges_count}个IP C段"
with open("./scan_rate_test_result.txt", "a", encoding="utf-8") as fp:
fp.write(log + "\n")
self._logger.info(f"Get {self.__c_ipranges_count} ip ranges")
self._outprglog(log)
# 如果是国家查询的ip段,那么需要调用下回调结束函数关闭查询端口
if self.country_flag:
inputipdata.sedone()
self._logger.debug("Complete split long ip ranges to C ip ranges")
def get_country_iprange(self, countryinfo: dict):
"""
获取国家二字码的ip段
在dbip里面选取相关的数据
:return:
"""
self._logger.debug("Start get local ip ranges result")
ip_rangs = None
try:
country = countryinfo.get("country")
province = countryinfo.get("province")
city = countryinfo.get("city")
geoid = countryinfo.get("citycode")
# sa = requests.session()
# # 开启一个session去拿首页,拿一些访问头和Cookie信息
# sa.get("http://ipblock.chacuo.net/")
# # 这里只考虑了C段网络,可能会有A段和B段,后续再加
# res = sa.get(f'http://ipblock.chacuo.net/down/t_txt=c_{country_code}')
# res_text = res.text
# ip_rangs = self.re_iprang.findall(res_text)
# self._logger.info(f"Start get {country_code} ip range")
ip_rangs = GeoIPLoc.get_location_ipranges(country, province, city, geoid)
# 这里是一定能返回一个seres对象的,这里是为了防止sqlite建立了过多的连接,虽然代码没有出过问题
# 但是为了以防万一还是做了错误处理
self._logger.info("Complete get local ip ranges result")
except:
self._logger.error(
f"Get country ip rangs error, err:{traceback.format_exc()}"
)
return ip_rangs
def _download_data(self) -> iter:
"""
下载数据接口,最后返回的数据为dict,
这里是数据下载流程的开始,新增暂停功能
modify by judy 2020/06/03
:return:
"""
# 为了计算进度,一定是先处理host再处理port
self.process_host()
self.process_port()
# 不间断的获取停止标识
# t = threading.Thread(target=self._get_stop_sign, name="stop_singn_scan")
# t.start()
# 1、zmap快速发现开放端口
# 想要开线程这里就得放在队列里,搞成一共在运行的线程
mzsq = threading.Thread(
target=self.make_zmap_scan_queue, name="make_zmap_scan_queue"
)
mzsq.start()
for i in range(max_zscan_threads):
t = threading.Thread(target=self.zmap_scan, name=f"zmap_threads{i}")
t.start()
for j in range(max_nscan_threads):
jthread = threading.Thread(target=self.nmap_scan, name=f"scan_threads{j}")
jthread.start()
for m in range(max_zgrab2_threads):
mthread = threading.Thread(
target=self.zgrab2_scan, name=f"zgrab2_threads{m}"
)
mthread.start()
for n in range(max_vulns_threads):
nthreads = threading.Thread(
target=self.vulns_scan, name=f"vulns_threads{n}"
)
nthreads.start()
# t = threading.Thread(target=self._scan_status, name="Monitor scan status")
# t.start()
ossq = threading.Thread(target=self.output_res, name=f"output_result")
ossq.start()
ossq.join()
# 程序执行完成
self._running = False
# 扫描完成应该给一个100%
self._logger.info("All scan complete")
self.task.progress = 1
self._write_iscantaskback(ECommandStatus.Dealing, "扫描完成:100%")
log = f"此次IP刺探任务已完成,总共刺探到了{self.output_count}条数据"
self._outprglog(log)
return
yield None
def make_zmap_scan_queue(self):
"""
生成zmap扫描的队列,主要是为了保证速率
modify by judy 2020/06/03
如果停止了那么也就不继续制作zmap扫描数据了
port :
host :list []
:return:
"""
self._logger.info("Start make zmap scan data and insert to zmap queue")
self.make_zmap_scan_queue_status = True
save_port_count = None
save_host_count = None
if self.sp.exists():
save_str = self.sp.read_text()
if save_str is not None and save_str != "":
save_list = save_str.split(" ")
save_port_count = int(save_list[0])
save_host_count = int(save_list[1])
# 这种中继数据只使用一次
host_count = 0
port_count = 0
got = False
while True:
if self.port_queue.empty() or self._stop_sign:
# 运行结束
self.make_zmap_scan_queue_status = False
self._logger.info("Complete make zmap scan data")
break
got = False
port = self.port_queue.get()
port_count += 1
got = True
if save_port_count is not None and save_port_count > port_count:
continue
elif save_port_count == port_count:
# 找到了当前续传的port就将这个数据删除了
self._logger.info(
f"Continue download, skip {save_port_count} port, start from port:{port.port}"
)
save_port_count = None
pass
tmp_hosts = []
try:
# C段的ip
for host in self.hosts.keys():
host_count += 1
if (
save_host_count is not None
and save_host_count > 0
and save_host_count > host_count
):
continue
elif save_host_count == host_count:
self._logger.info(
f"Continue download, skip {save_host_count} host, start from host:{host}"
)
save_host_count = None
pass
# 记录目前扫描到了哪个host
line = f"{port_count} {host_count-self.zmap_queue.qsize()}"
self.sp.write_text(line)
# self._logger.info(f"Write line:{line}")
# 唯一元组转换成列表
host = list(host)
tmp_hosts.extend(host)
if len(tmp_hosts) > max_zscan_ipranges:
while self.zmap_queue.qsize() > max_zscan_threads * 10:
self._logger.debug(
f"Zmap scan queue over {max_zscan_threads*10}, too many objects to scan, wait 10 second"
)
time.sleep(10)
self.zmap_queue.put((tmp_hosts, port))
# 复原tmp_host
tmp_hosts = []
# 单个的IP或者是host
for ip in self.host.keys():
host_count += 1
if (
save_host_count is not None
and save_host_count > 0
and save_host_count > host_count
):
continue
elif save_host_count == host_count:
self._logger.info(
f"Continue download, skip {save_host_count} host, start from host:{ip}"
)
save_host_count = None
pass
# 记录目前扫描到了哪个host
line = f"{port_count} {host_count-self.zmap_queue.qsize()}"
self.sp.write_text(line)
host = list(ip)
tmp_hosts.extend(host)
if len(tmp_hosts) > max_zscan_ip:
while self.zmap_queue.qsize() > max_zscan_threads * 10:
self._logger.debug(
f"Zmap scan queue over {max_zscan_threads*10}, too many objects to scan, wait 10 second"
)
time.sleep(10)
self.zmap_queue.put((tmp_hosts, port))
# 复原tmp_host
tmp_hosts = []
except:
self._logger.error(
f"Put hosts port to zmap scan queue error\nport:{port.port}\nerror:{traceback.format_exc()}"
)
finally:
# 最后执行完成查看下该端口下还有没有剩余的hosts
if len(tmp_hosts) > 0:
self.zmap_queue.put((tmp_hosts, port))
if got:
self.port_queue.task_done()
# 当前函数执行完成后手动释放下dict,by swm 20201012
self.hosts = None
self.host = None
def _make_back_progress(self):
"""
扫描进度的回馈
这个扫描进度,不应该考虑到扫描整个国家的情况
那就直接在imap里面算,
但是这个东西是并行的怎么算
唉,先写着用用吧,by Judy 2020/04/07
:return:
"""
try:
progress = round(self.__has_scan_count / self.__all_scan_count, 2)
if progress - self.__progress > 0.001:
# 这里的进度估算并不准确,有时会超过1
if progress > 1.0:
progress = 0.999
self.__progress = progress
self.task.progress = progress
self._logger.info(f"Scan progress:{float(progress * 100)}%")
self._outprglog(f"正在扫描:{float(progress * 100)}%")
self._write_iscantaskback(
ECommandStatus.Dealing, f"正在扫描:{float(progress * 100)}%"
)
except:
self._logger.error(f"Make progress error, err:{traceback.format_exc()}")
return
def _is_complete(self, scan_queue, scan_thread_state) -> bool:
"""
这里判断任务是否完成,程序全部执行完成了返回true
任务没有执行完成返回false
modify by judy 2020/06/03
如果任务被中途暂停了那么直接停止
:return:
"""
if self._stop_sign:
return True
complete = False
# 0表示线程还没有开始
if len(scan_thread_state) == 0:
return complete
if scan_queue.empty() and True not in scan_thread_state.values():
# 队列为空,并且已经没有任务正在执行中了
complete = True
return complete
def zmap_scan(self):
"""
使用zmap来发现存活的端口
zmap和masscan的效果类似,现在尝试使用masscan来扫描
换了试试
:return:
"""
# 当前线程的唯一标识,进来以后扫描就开始
ident = threading.current_thread().ident
cur_state = True
self.__zmap_scan_thread_state[ident] = cur_state
got = False
while True:
# 运行结束
if (
not self.make_zmap_scan_queue_status and self.zmap_queue.empty()
) or self._stop_sign:
# 所有的端口已经扫描完成
cur_state = False
self.__zmap_scan_thread_state[ident] = cur_state
self._logger.info(f"Zmap {ident} Scan complete")
break
if self.zmap_queue.empty():
time.sleep(1)
continue
got = False
hosts, t_port = self.zmap_queue.get()
got = True
# 这里去判断下, 如果是domain的话
new_host, ip_domain_dict = IpProcessTools.judge_ip_or_domain(hosts)
if len(new_host) == 0:
self._logger.debug(f"Get no live hosts")
continue
log = f"开始探测{len(hosts)}个主机存活和端口开放情况, PORT:{t_port.port}, Protocol:{t_port.flag}"
self._outprglog(log)
# zmap每次扫描的很快,所以可以多给点ip段,但是命令行装不了那么多,所以以文件的方式传参注意创建文件和删除文件,by judy 2020/08/20
with self._file_locker:
zmap_scan_host_path = self.tmppath / f"{str(uuid.uuid1())}"
for shost in new_host:
with zmap_scan_host_path.open("a", encoding="utf-8") as fp:
fp.write(shost + "\n")
self._logger.debug(
f"Start Zmap thread scan an object, zmap thread id: {ident}"
)
try:
for port_info in self.zmap.scan_open_ports(
self.task, 1, zmap_scan_host_path, [t_port]
):
if port_info is None:
continue
ip = port_info._host
o_host = ip_domain_dict.get(ip)
if o_host is not None:
zmapres = o_host
else:
zmapres = ip
with self._nmap_tmp_dict_locker:
self.process_nmap_data(zmapres, t_port)
self.__has_scan_count += 1
except:
self._logger.error(
f"Zmap scan port error\nport:{t_port.port} protocol:{t_port.flag}\nerror:{traceback.format_exc()}"
)
finally:
if got:
self.zmap_queue.task_done()
if zmap_scan_host_path.exists():
zmap_scan_host_path.unlink()
self._logger.debug(f"Zmap {ident} complete scan an object")
# 退出break后
if True not in self.__zmap_scan_thread_state.values():
with self._nmap_tmp_dict_locker:
self.process_nmap_data(None, None, True)
def process_nmap_data(self, ip, port, zscan_stop_flasg=False):
"""
处理nmap的数据
将nmap累加到一定的数量后再放到nmap扫描
这样nmap的速率会比较均衡
"""
if ip is not None and port is not None:
if self._nmap_tmp.__contains__(port):
self._nmap_tmp[port].append(ip)
if len(self._nmap_tmp.get(port)) >= max_nmap_ip:
with self._file_locker:
nmap_scan_path = self.tmppath / f"{str(uuid.uuid1())}"
with nmap_scan_path.open("a", encoding="utf-8") as fp:
fp.writelines([ip + "\n" for ip in self._nmap_tmp.get(port)])
# 队列不宜堆积过多
while self.nmap_queue.qsize() > max_nscan_threads * 10:
self._logger.debug(
f"Nmap scan queue over {max_nscan_threads*10}, too many objects to scan, wait 20 seconds"
)
time.sleep(10)
self.nmap_queue.put((nmap_scan_path, port))
# 入队后出栈
self._nmap_tmp.pop(port)
else:
self._nmap_tmp[port] = [ip]
# 判断一下zmap是否结束了
if zscan_stop_flasg:
for port, ipranges in self._nmap_tmp.items():
with self._file_locker:
nmap_scan_path = self.tmppath / f"{str(uuid.uuid1())}"
with nmap_scan_path.open("a", encoding="utf-8") as fp:
fp.writelines([ip + "\n" for ip in ipranges])
self.nmap_queue.put((nmap_scan_path, port))
# 将所有的nmap加入队列后就将这个数据清空
self._nmap_tmp = {}
return
def nmap_scan(self):
"""
nmap
这里会使用多线程去处理已经查到开放了的端口
:return:
"""
ident = threading.current_thread().ident
cur_state = True
self.__nmap_scan_thread_state[ident] = cur_state
got = False
while True:
# 扫描完成退出
if self._is_complete(self.nmap_queue, self.__zmap_scan_thread_state):
cur_state = False
self.__nmap_scan_thread_state[ident] = cur_state
self._logger.info(f"Nmap {ident} scan complete")
break
# 查看下队列里面还有没有东西
if self.nmap_queue.empty():
time.sleep(1)
continue
got = False
ips_path, port = self.nmap_queue.get()
got = True
log = f"开始探测主机协议: PORT:{port.port} protocol:{port.flag}"
self._logger.info(
f"Start nmap {ips_path.as_posix()}, port:{port.port} protocol:{port.flag}"
)
self._outprglog(log)
try:
tmp_zgrab2_dict = {}
self._logger.debug(f"Start Nmap scan an object, nmap thread id:{ident}")
for portinfo in self.nmap.scan_open_ports_by_file(
self.task, 1, ips_path.as_posix(), [port], outlog=self._outprglog
):
if not isinstance(portinfo, PortInfo):
continue
# 这里出来的全是一个端口下的东西,尼玛的还好这里去重了
tmp_zgrab2_dict[portinfo._host] = portinfo
if len(tmp_zgrab2_dict) > 0:
while self.zgrab2_queue.qsize() > max_zgrab2_threads * 10:
# 一分钟去检测一次队列的处理情况
self._logger.debug(
f"Nmap Threading id:{ident},Zgrab2 scan queue over {max_zgrab2_threads*10}, too many objects to scan, wait 20 second"
)
time.sleep(20)
self.zgrab2_queue.put((tmp_zgrab2_dict, port))
log = f"探测主机协议完成:{list(tmp_zgrab2_dict.keys())}"
self._logger.info(
f"Get nmap result {tmp_zgrab2_dict.__len__()} ips and put into zgrab2"
)
self._outprglog(log)
except:
self._logger.error(
f"Nmap scan port info error, id:{ident}, err:{traceback.format_exc()}"
)
finally:
if got:
self.nmap_queue.task_done()
# 回馈进度
self._make_back_progress()
# 删除文件
try:
if ips_path.exists():
ips_path.unlink()
except:
self._logger.error(
f"Delete zmap res path error, err:{traceback.format_exc()}"
)
self._logger.debug(
f"Complete Nmap scan an object, nmap thread id:{ident}"
)
def zgrab2_scan(self):
"""
zgrab2扫描
port:PortInfo
:return:
"""
# 当前线程的唯一标识,进来以后扫描就开始
ident = threading.current_thread().ident
cur_state = True
self.__zgrab2_scan_thread_state[ident] = cur_state
got = False
while True:
# 运行结束
if self._is_complete(self.zgrab2_queue, self.__nmap_scan_thread_state):
cur_state = False
self.__zgrab2_scan_thread_state[ident] = cur_state
self._logger.info(f"Zgrab2 {ident} scan complete")
break
if self.zgrab2_queue.empty():
time.sleep(0.1)
continue
got = False
portinfo_dict, port = self.zgrab2_queue.get()
got = True
log = f"开始协议详情探测:{list(portinfo_dict.keys())}"
self._logger.info(
f"Start zgrab2 scan {len(portinfo_dict)} ips, zgrab2 thread id:{ident}"
)
self._outprglog(log)
try:
self._scan_application_protocol(1, portinfo_dict, port)
self._logger.debug(f"Complete Zgrab2 scan, zgrab2 thread id:{ident}")
for portinfo in portinfo_dict.values():
while self.vulns_queue.qsize() > max_vulns_threads * 10:
self._logger.debug(
f"Zgrab2 threading id:{ident},Vulns scan queue over {max_vulns_threads*10} objects, too many data, wait 20 second"
)
time.sleep(20)
self.vulns_queue.put(portinfo)
log = f"协议详情探测: 获取到{len(portinfo_dict)}个结果"
self._logger.info(f"Put {len(portinfo_dict)} objects to vuls queue")
self._outprglog(log)
except Exception as err:
self._logger.error(f"Zgrab2 scans error, err:{err}")
finally:
# 手动释放下dict对象
if got:
portinfo_dict = None
self.zgrab2_queue.task_done()
def _scan_application_protocol(self, level: int, port_info_dict, port):
"""
根据 portinfo 的协议类型,扫描其应用层协议
增加效率,每次扫描一个网段的数据,不再去扫描一个单一的那样太慢了
"""
try:
# 这个ports是直接关联的最开始处理的那个port,所以直接取第一个,modify by judy
port = port.port
# 先去扫一遍:
if port != 80:
self.zgrab2.get_tlsinfo(
self.task, level, port_info_dict, port, outlog=self._outprglog
)
tmpdict = {}
# 没有协议的端口
portdict = {}
# 进行协议分类
for k, v in port_info_dict.items():
# k是ip, v是portinfo
service = v.service
if service is not None:
ser_dict = tmpdict.get(service)
# 判断里面有没有该类型的dict
if ser_dict is None:
tmpdict[service] = {}
# 添加
tmpdict[service][k] = v
else:
# 没有协议只有端口的字典
portdict[k] = v
# 扫描只有端口的应用层协议
self._scan_port_application(level, portdict, port)
# 拿到每一类的东西去扫描
for service, service_dict in tmpdict.items():
if service == "ftp":
self.zgrab2.get_ftp_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "ssh":
self.zgrab2.get_ssh_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "telnet":
self.zgrab2.get_telnet_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "smtp":
self.zgrab2.get_smtp_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service.__contains__("http") or service.__contains__("tcpwrapped"):
self.zgrab2.get_siteinfo(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "pop3":
self.zgrab2.get_pop3_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "ntp":
self.zgrab2.get_ntp_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "imap":
self.zgrab2.get_imap_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "mssql":
self.zgrab2.get_mssql_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "redis":
self.zgrab2.get_redis_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "mongodb":
self.zgrab2.get_mongodb_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "mysql":
self.zgrab2.get_mysql_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
elif service == "oracle":
self.zgrab2.get_oracle_info(
self.task, level, service_dict, port, outlog=self._outprglog
)
else:
# 协议不在这里面的
self._scan_port_application(level, service_dict, port)
except:
self._logger.error(
"Scan ip port application protocol error:\ntaskid:{}\nerror:{}".format(
self.task.taskid, traceback.format_exc()
)
)
def _scan_port_application(self, level, portdict, port):
"""
这个主要是为了扫描一些没有协议的端口,或者协议没在上面那个方法里的
:return:
"""
if len(portdict) == 0:
return
if port == 21:
self.zgrab2.get_ftp_info(self.task, level, portdict, port)
elif port == 22:
self.zgrab2.get_ssh_info(self.task, level, portdict, port)
elif port == 23:
self.zgrab2.get_telnet_info(self.task, level, portdict, port)
elif port == 25 or port == 465:
self.zgrab2.get_smtp_info(self.task, level, portdict, port)
elif port == 80 or port == 443:
self.zgrab2.get_siteinfo(self.task, level, portdict, port)
elif port == 110 or port == 995:
self.zgrab2.get_pop3_info(self.task, level, portdict, port)
elif port == 123:
self.zgrab2.get_ntp_info(self.task, level, portdict, port)
elif port == 143 or port == 993:
self.zgrab2.get_imap_info(self.task, level, portdict, port)
elif port == 1433:
self.zgrab2.get_mssql_info(self.task, level, portdict, port)
elif port == 6379:
self.zgrab2.get_redis_info(self.task, level, portdict, port)
elif port == 27017:
self.zgrab2.get_mongodb_info(self.task, level, portdict, port)
elif port == 3306:
self.zgrab2.get_mysql_info(self.task, level, portdict, port)
elif port == 1521:
self.zgrab2.get_oracle_info(self.task, level, portdict, port)
def vulns_scan(self):
"""
漏洞扫描,回去扫描某个ip的具体页面
由于是http连接非常耗性能,因此页面做了勾选和筛选
"""
# 当前线程的唯一标识,进来以后扫描就开始
ident = threading.current_thread().ident
cur_state = True
self.__vulns_scan_thread_state[ident] = cur_state
got = False
while True:
# 运行结束
if self._is_complete(self.vulns_queue, self.__zgrab2_scan_thread_state):
cur_state = False
self.__vulns_scan_thread_state[ident] = cur_state
self._logger.info(f"Vulns {ident} scan complete")
break
if self.vulns_queue.empty():
time.sleep(0.1)
continue
got = False
portinfo = self.vulns_queue.get()
got = True
if len(self._vulns_list) > 0:
log = f"开始漏洞扫描: {self._vulns_list}"
self._logger.debug(
f"Start vulns scan {self._vulns_list}, vulns threading id:{ident}"
)
self._outprglog(log)
self.logicalgrabber.grabbanner(
portinfo, self._vulns_list, flag="iscan", outlog=self._outprglog
)
try:
self.output_res_queue.put(portinfo)
except Exception as err:
self._logger.error(f"vulns scan error, err:{err}")
finally:
if got:
self.vulns_queue.task_done()
if len(self._vulns_list) > 0:
self._logger.debug(
f"Stop vulns scan {self._vulns_list}, vulns threading id:{ident}"
)
def output_res(self):
"""
结果输出线程
:return:
"""
self._logger.info(f"Start output result thread")
got = False
while True:
# 结束
if self._is_complete(self.output_res_queue, self.__vulns_scan_thread_state):
self._logger.info(f"Complete output result thread")
# 正常结束就删除当前任务的扫描进度文件
self.sp.unlink()
break
if self.output_res_queue.empty():
time.sleep(0.1)
continue
try:
got = False
portinfo: PortInfo = self.output_res_queue.get()
got = True
file_port = portinfo._port
ip = portinfo._host
root: IP = IP(self.task, 1, ip)
root.set_portinfo(portinfo)
geoinfo, org, isp = self.dbip.get_ip_mmdbinfo(level=1, ip=ip)
country_code = "unknown"
if isinstance(geoinfo, GeoInfo):
root.set_geolocation(geoinfo)
country_code = geoinfo._country_code
root.org = org
root.isp = isp
if root._subitem_count() > 0:
out_dict = root.get_outputdict()
# 输出锁,防止输出互锁,好像原本输出里面就有输出锁,先测试不用锁的情况看会不会出问题
# with self._file_locker:
file_name = f"{country_code}_{file_port}_{int(time.time() * 1000)}"
self._outputdata(out_dict, file_name=file_name)
self.output_count += 1
if isinstance(portinfo, PortInfo):
del portinfo
except:
self._logger.error(f"Output result error: {traceback.format_exc()}")
finally:
if got:
self.output_res_queue.task_done()
|
Desktop.py
|
import headerfile as head
head. create("Nitish",25)
head. read("Nitish")
head. create("Nitish",50)
head. modify("Nitish",55)
head. delete("Nitish")
test=Thread(target=(create or read or delete),args=(key_name,value,timeout))
test.start()
test.sleep()
|
cruzamento-rua.py
|
from threading import Thread
from threading import Semaphore
from time import sleep
from random import randint
import sys
#---------------------------------------------------
def carroA(sA, sB):
while True:
sA.acquire()
print('Carro A Passou')
sleep(2)
sB.release()
#---------------------------------------------------
def carroB(sB, sC):
while True:
sB.acquire()
print('Carro B Passou')
sleep(2)
sC.release()
#---------------------------------------------------
def carroC(sC, sD):
while True:
sC.acquire()
print('Carro C Passou')
sleep(2)
sD.release()
#---------------------------------------------------
def carroD(sD, sA):
while True:
sD.acquire()
print('Carro D Passou')
sleep(2)
sA.release()
#---------------------------------------------------
sA = Semaphore(1)
sB = Semaphore(0)
sC = Semaphore(0)
sD = Semaphore(0)
tA = Thread(target=carroA, args=[sA, sB])
tA.start()
tB = Thread(target=carroB, args=[sB, sC])
tB.start()
tC = Thread(target=carroC, args=[sC, sD])
tC.start()
tD = Thread(target=carroD, args=[sD, sA])
tD.start()
tA.join()
tB.join()
tC.join()
tD.join()
|
driver.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
default=None,
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
default=None,
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._caps = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self._host_state = None
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"writethrough",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_("Invalid cachemode %(cache_mode)s specified "
"for disk type %(disk_type)s.") % locals())
continue
self.disk_cachemodes[disk_type] = cache_mode
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
try:
if lv_ver is not None:
libvirt_version = self._conn.getLibVersion()
if libvirt_version < _munge_version(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = self._conn.getVersion()
if hypervisor_version < _munge_version(hv_ver):
return False
if hv_type is not None:
hypervisor_type = self._conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager."""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden."""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread."""
if self._event_queue is None:
LOG.debug("Event loop thread is not active, "
"discarding event %s" % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events."""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0."""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
dispatch_thread = eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
libvirt.virEventRegisterDefaultImpl()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.') %
locals())
self._init_events()
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
if not CONF.libvirt_nonblocking:
self._wrapped_conn = self._connect(self.uri(),
self.read_only)
else:
self._wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri(), self.read_only)
try:
LOG.debug("Registering for lifecycle events %s" % str(self))
self._wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception, e:
LOG.warn(_("URI %s does not support events"),
self.uri())
return self._wrapped_conn
_conn = property(_get_connection)
def _test_connection(self):
try:
self._wrapped_conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.notify(nova_context.get_admin_context(),
notifier.publisher_id('compute'),
'compute.libvirt.error',
notifier.ERROR,
payload)
pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
def legacy_nwinfo(self):
return True
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._conn.lookupByID(domain_id)
names.append(domain.name())
except libvirt.libvirtError:
# Instance was deleted while listing... ignore it
pass
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
return [self._conn.lookupByName(name).UUIDString()
for name in self.list_instances()]
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_("Error from libvirt during destroy. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise utils.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise utils.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise utils.LoopingCallDone()
kwargs = {'is_running': False}
timer = utils.FixedIntervalLoopingCall(_wait_for_destroy, old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during undefine. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.NotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during unfilter. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError, e:
LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
) % locals())
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
if not self._initiator and not self._fc_wwnns and not self._fc_wwpns:
msg = _("No Volume Connector found.")
LOG.error(msg)
raise exception.NovaException(msg)
connector = {'ip': CONF.my_ip,
'initiator': self._initiator,
'host': CONF.host}
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception, ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@exception.wrap_exception()
def attach_interface(self, instance, image_meta, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
self.firewall_driver.setup_basic_filtering(instance,
[(network, mapping)])
cfg = self.vif_driver.get_config(instance, network, mapping,
image_meta)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
raise exception.InterfaceAttachFailed(instance)
@exception.wrap_exception()
def detach_interface(self, instance, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance, network, mapping, None)
try:
self.vif_driver.unplug(instance, (network, mapping))
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if 'architecture' in base.get('properties', {}):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
image_format = 'raw'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
snapshot_name,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE (rmk): libvirt needs to be able to write to the
# temp directory, which is owned nova.
utils.execute('chmod', '777', tmpdir, run_as_root=True)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
if not live_snapshot:
snapshot_backend.snapshot_delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end and cur != 0 and end != 0:
return False
else:
return True
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while _wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
out_path, image_format)
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
if self._soft_reboot(instance):
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
return True
elif old_domid != new_domid:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom, instance=instance)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@exception.wrap_exception()
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
return (None, None)
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError, e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception, e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
if not CONF.libvirt_images_type == "lvm":
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = instance_types.extract_instance_type(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = instance['os_type']
if not os_type_with_default:
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(blockinfo.get_eph_disk(eph)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except exception.ProcessExecutionError, e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection
elif CONF.libvirt_inject_partition != -2:
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj in ('key', 'net', 'metadata', 'admin_pass', 'files'):
if locals()[inj]:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'), locals(), instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'])
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping and disk_mapping['root']['dev'] is not None:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = "console=ttyS0"
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
root_device_name)
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
root_device_name)
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance,
network, mapping,
image_meta)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
return guest
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
LOG.debug(_("Start to_xml instance=%(instance)s "
"network_info=%(network_info)s "
"disk_info=%(disk_info)s "
"image_meta=%(image_meta)s rescue=%(rescue)s"
"block_device_info=%(block_device_info)s") %
locals())
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
return xml
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = _("Error from libvirt while looking up %(instance_name)s: "
"[Error Code %(error_code)s] %(ex)s") % locals()
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
if xml:
domain = self._conn.defineXML(xml)
domain.createWithFlags(launch_flags)
self._enable_hairpin(domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._conn.lookupByID(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def get_vcpu_total(self):
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
"""
try:
return self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
LOG.debug(_("couldn't obtain the vpu count from domain id:"
" %s") % dom_id)
else:
total += len(vcpus[1])
except libvirt.libvirtError as err:
if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug(_("List of domains returned by libVirt: %s")
% dom_ids)
LOG.warn(_("libVirt can't find a domain with id: %s")
% dom_id)
continue
raise
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
# skip dom0
dom_mem = int(self._conn.lookupByID(domain_id).info()[2])
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
LOG.debug(_("Trying to get stats for the volume %s"),
bdm['volume_id'])
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage.append(dict(volume=bdm['volume_id'],
instance=instance,
rd_req=rd_req,
rd_bytes=rd_bytes,
wr_req=wr_req,
wr_bytes=wr_bytes,
flush_operations=flush_ops))
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_("Getting block stats failed, device might have "
"been detached. Code=%(errcode)s Error=%(e)s")
% locals())
except exception.InstanceNotFound:
LOG.info(_("Could not find domain in libvirt for instance %s. "
"Cannot get block stats for device") % instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.get_disk_over_committed_size_total()
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': _get_disk_available_least()}
return dic
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
src = instance_ref['host']
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(ctxt, instance_ref,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and not is_volume_backed:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance_ref['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s: "
"Disk of instance is too large(available"
" on destination host:%(available)s "
"< need:%(necessary)s)")
raise exception.MigrationError(reason=reason % locals())
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openReadonly().getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError, e:
with excutils.save_and_reraise_exception():
ret = e.message
LOG.error(m % locals())
if ret <= 0:
LOG.error(m % locals())
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False."""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance_ref,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance_ref["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, ctxt, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance_ref["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %(e)s") % locals(),
instance=instance_ref)
recover_method(ctxt, instance_ref, dest, block_migration)
# Waiting for completion of live_migration.
timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
timer.stop()
post_method(ctxt, instance_ref, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Create the instance directory on destination compute node.
instance_dir = libvirt_utils.get_instance_path(instance_ref)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance_ref)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance_ref)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
greenthread.sleep(1)
def pre_block_migration(self, ctxt, instance, disk_info_json):
"""Preparation for block migration."""
# NOTE (rmk): When preparing for a block migration, the instance dir
# should not exist on the destination hypervisor.
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
self._create_images_and_backing(ctxt, instance, disk_info_json)
def _create_images_and_backing(self, ctxt, instance, disk_info_json):
"""
:params ctxt: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
disk_info = jsonutils.loads(disk_info_json)
instance_dir = libvirt_utils.get_instance_path(instance)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=ctxt,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(ctxt, instance)
def post_live_migration_at_destination(self, ctxt,
instance_ref,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.to_xml(instance_ref, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance_ref["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None):
"""Preparation block migration.
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = _("Error from libvirt while getting description of "
"%(instance_name)s: [Error Code %(error_code)s] "
"%(ex)s") % locals()
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
if disk_type != 'file':
LOG.debug(_('skipping %(path)s since it looks like volume') %
locals())
continue
if not path:
LOG.debug(_('skipping disk for %(instance_name)s as it'
' does not have a path') %
locals())
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
else:
backing_file = ""
virt_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
i_vt_sz = int(info['virt_disk_size'])
i_dk_sz = int(info['disk_size'])
disk_over_committed_size += i_vt_sz - i_dk_sz
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
locals())
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'])
disk_info = jsonutils.loads(disk_info_text)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
if same_host:
dest = None
utils.execute('mkdir', '-p', inst_base)
else:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if same_host:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
disk.extend(info['path'], size)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert."""
shutil.rmtree(inst_base)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
if os.path.exists(inst_base):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability'
'%(instance_path)s')
% locals())
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
data = {}
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
disk_info_dict = self.driver.get_local_gb_info()
data["disk_total"] = disk_info_dict['total']
data["disk_used"] = disk_info_dict['used']
data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["supported_instances"] = \
self.driver.get_instance_capabilities()
self._stats = data
return data
|
uart_provider.py
|
import os
import re
import sys
import time
import json
import binascii
import math
# import asyncio
import datetime
import threading
import struct
from ...framework.utils import helper
from ...framework.utils import resource
from ..base import OpenDeviceBase
from ...framework.context import APP_CONTEXT
from ..decorator import with_device_message
from ...framework.configuration import get_config
from ..upgrade_workers import FirmwareUpgradeWorker
from ..upgrade_center import UpgradeCenter
from ..parsers.open_field_parser import encode_value
class Provider(OpenDeviceBase):
'''
INS2000 UART provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'INS2000'
self.server_update_rate = 50
self.is_logging = False
self.is_mag_align = False
self.bootloader_baudrate = 460800
self.device_info = None
self.app_info = None
self.app_config_folder = ''
self.parameters = None
self.enable_data_log = True
self.data_folder_path = None
self.prepare_folders()
self.is_backup = False
self.is_restore = False
self.is_app_matched = False
self.connected = True
self.raw_log_file = None
self.best_gnss_pos = None
self.inspvax = None
self.gps_week = 0
self.gps_seconds = 0
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
config_file_name = 'INS2000.json'
self.data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(self.data_folder_path):
os.makedirs(self.data_folder_path)
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'INS2000')
config_file_path = os.path.join(
self.setting_folder_path, config_file_name)
if not os.path.isfile(config_file_path):
if not os.path.isdir(self.setting_folder_path):
os.makedirs(self.setting_folder_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join('INS2000', config_file_name))
with open(config_file_path, "wb") as code:
code.write(app_config_content)
@property
def is_in_bootloader(self):
''' Check if the connected device is in bootloader mode
'''
return False
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
return '# Connected {0} #\n\rDevice: {1} \n\rFirmware: {2}'\
.format(self.type, device_info, app_info)
def _build_device_info(self, text):
'''
Build device info
'''
def _build_app_info(self, text):
'''
Build app info
'''
def load_properties(self):
'''
load properties
'''
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'INS2000.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
app_file_path = os.path.join(
self.setting_folder_path, 'INS2000.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
def after_setup(self):
setupcommands = self.properties["setupcommands"]
if self.data_folder_path is not None:
dir_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
file_time = time.strftime(
"%Y_%m_%d_%H_%M_%S", time.localtime())
file_name = self.data_folder_path + '/' + 'ins2000_log_' + dir_time
os.mkdir(file_name)
self.raw_log_file = open(
file_name + '/' + 'raw_' + file_time + '.bin', "wb")
# self.communicator.flushInput()
for cmd in setupcommands:
self.communicator.write(cmd.encode())
time.sleep(0.01)
def after_bootloader_switch(self):
self.communicator.serial_port.baudrate = self.bootloader_baudrate
def on_read_raw(self, data):
if self.raw_log_file is not None:
self.raw_log_file.write(data)
def on_receive_output_packet(self, packet_type, data, *args, **kwargs):
'''receive output packet'''
# print(type(packet_type))
if type(packet_type) == int:
self.gps_week = data['header_gps_week']
self.gps_seconds = data['header_gps_seconds']
if packet_type == 1429:
if data['lat'] != 0.0 and data['lon'] != 0.0:
# print(packet_type, data)
self.best_gnss_pos = data
self.output_pos()
if packet_type == 1465:
# print(packet_type, data)
if data['lat'] != 0.0 and data['lon'] != 0.0:
# print(packet_type, data)
self.inspvax = data
self.output_pos()
if packet_type == 1462:
self.output_imu(data)
if packet_type == 'nmea':
self.output_nmea(data)
def output_pos(self):
'''output pos'''
if self.best_gnss_pos is not None and \
self.inspvax is not None and \
self.best_gnss_pos['header_gps_week'] == self.inspvax['header_gps_week'] and \
self.best_gnss_pos['header_gps_seconds'] == self.inspvax['header_gps_seconds']:
# print(self.best_gnss_pos, self.inspvax)
pos_data = {}
pos_data['GPS_Week'] = self.inspvax['header_gps_week']
pos_data['GPS_TimeofWeek'] = self.inspvax['header_gps_seconds'] * 0.001
pos_data['positionMode'] = self.getpostype(self.inspvax['pos_type'])
pos_data['hdop'] = 1.0
pos_data['age'] = self.best_gnss_pos['diff_age']
pos_data['numberOfSVs'] = self.best_gnss_pos['soln_svs']
pos_data['latitude'] = self.inspvax['lat']
pos_data['longitude'] = self.inspvax['lon']
pos_data['height'] = self.inspvax['hgt'] + self.inspvax['undulation']
pos_data['velocityMode'] = 1
if pos_data['positionMode'] != 1 and \
pos_data['positionMode'] != 4 and \
pos_data['positionMode'] != 5:
pos_data['velocityMode'] = 2
pos_data['insStatus'] = self.inspvax['ins_status']
pos_data['insPositionType'] = pos_data['positionMode']
pos_data['roll'] = self.inspvax['roll']
pos_data['pitch'] = self.inspvax['pitch']
pos_data['velocityNorth'] = self.inspvax['north_velocity']
pos_data['velocityEast'] = self.inspvax['east_velocity']
pos_data['velocityUp'] = self.inspvax['up_velocity']
pos_data['latitude_std'] = self.inspvax['lat_sigma']
pos_data['longitude_std'] = self.inspvax['lon_sigma']
pos_data['height_std'] = self.inspvax['hgt_sigma']
pos_data['north_vel_std'] = self.inspvax['north_velocity_sigma']
pos_data['east_vel_std'] = self.inspvax['east_velocity_sigma']
pos_data['up_vel_std'] = self.inspvax['up_velocity_sigma']
self.add_output_packet('pos', pos_data)
def output_imu(self, imudata):
'''output imu'''
imu_data = {}
imu_data['GPS_Week'] = imudata['header_gps_week']
imu_data['GPS_TimeOfWeek'] = imudata['header_gps_seconds'] * 0.001
imu_data['x_accel'] = imudata['x_accel']
imu_data['y_accel'] = imudata['y_accel']
imu_data['z_accel'] = imudata['z_accel']
imu_data['x_gyro'] = imudata['x_gyro']
imu_data['y_gyro'] = imudata['y_gyro']
imu_data['z_gyro'] = imudata['z_gyro']
self.add_output_packet('imu', imu_data)
def getpostype(self, position_type):
"""get position type"""
positions = {
'16': 1,
'53': 1,
'17': 2,
'54': 2,
'50': 4,
'56': 4,
'55': 5,
'34': 5,
}
return positions.get(str(position_type), 0)
def output_nmea(self, data):
if 'GSV' in data:
self._output_gsv(data)
def _output_gsv(self, gsv):
idx = gsv.find('*')
if idx < 0:
return
gsv_arr = gsv[:idx].split(',')
snr_arr = gsv_arr[4:]
if len(snr_arr) % 4 != 0:
return
sys_tag = gsv_arr[0][1:3]
sys_tabs = {
'GP': 0,
'GL': 1,
'GA': 2,
'GQ': 3,
'BD': 4
}
num = int(len(snr_arr) / 4)
snr = []
for i in range(num):
snr.append({
'GPS_Week': self.gps_week,
'GPS_TimeOfWeek': int(self.gps_seconds * 0.001),
'satelliteId': snr_arr[i * 4 + 0],
'systemId': sys_tabs.get(sys_tag),
'antennaId': 0,
'elevation': snr_arr[i * 4 + 1],
'azimuth': snr_arr[i * 4 + 2],
'l1cn0': snr_arr[i * 4 + 3],
'l2cn0': 0
})
self.add_output_packet('snr', snr)
self.add_output_packet('skyview', snr)
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {
'packetType': 'ping',
'data': {'status': '1'}
}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name', 'value': 'INS2000'},
{'name': 'IMU', 'value': ''},
{'name': 'PN', 'value': ''},
{'name': 'Firmware Version',
'value': ''},
{'name': 'SN', 'value': ''},
{'name': 'App Version', 'value': ''}
]
}
def get_log_info(self):
'''
Build information for log
'''
return {
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': []
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
parameter_values = []
yield {
'packetType': 'inputParams',
'data': parameter_values
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
# self.communicator.write(command_line)
# result = self.get_input_result('gP', timeout=1)
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
self.parameters = data
yield {
'packetType': 'inputParam',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
grouped_parameters = {}
for parameter in params:
exist_parameter = next(
(x for x in input_parameters if x['paramId'] == parameter['paramId']), None)
if exist_parameter:
has_group = grouped_parameters.__contains__(
exist_parameter['category'])
if not has_group:
grouped_parameters[exist_parameter['category']] = []
current_group = grouped_parameters[exist_parameter['category']]
current_group.append(
{'paramId': parameter['paramId'], 'value': parameter['value'], 'type': exist_parameter['type']})
for group in grouped_parameters.values():
message_bytes = []
for parameter in group:
message_bytes.extend(
encode_value('int8', parameter['paramId'])
)
message_bytes.extend(
encode_value(parameter['type'], parameter['value'])
)
# print('parameter type {0}, value {1}'.format(
# parameter['type'], parameter['value']))
# result = self.set_param(parameter)
command_line = helper.build_packet(
'uB', message_bytes)
# for s in command_line:
# print(hex(s))
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
# self.communicator.write(command_line)
# result = self.get_input_result('uP', timeout=1)
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line, timeout=2)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': error
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thread = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thread.start()
# print("Upgrade OpenRTK firmware started at:[{0}].".format(
# datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
del sys.modules['multiprocessing']
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
start_all_test.py
|
import os
import time
import json5
import argparse
from threading import Thread
from job_manage import JobManager
class Config(object):
def __init__(self, rest_server_url, hdfs_url, webhdfs_url, PAI_username, PAI_password, jobs):
self.rest_server_url = rest_server_url
self.hdfs_url = hdfs_url
self.webhdfs_url = webhdfs_url
self.PAI_username = PAI_username
self.PAI_password = PAI_password
self.jobs = set(jobs.split(','))
class Submitter(object): # relate to the jobs
def __init__(self, threshold, config):
self.jobmanager = JobManager(config)
self.threshold = threshold
self.jobs = config.jobs
def runandlisten(self, config):
###
# input_type: dict
# input: the config of the job
# output_type: None
# output: None
# start the job and call rest server API to monitor the state of the job
###
try:
self.jobmanager.start(config)
except:
print("Job " + config["jobName"] + " start failed!")
else:
old = time.time()
try:
state = self.jobmanager.get_status(job_name=config["jobName"])["state"]
while (state == "RUNNING" or state == "WAITING") and time.time() - old < 60 * self.threshold:
time.sleep(20)
state = self.jobmanager.get_status(job_name=config["jobName"])["state"]
if state == "FAILED":
print(config['jobName'] + " is failed!")
else:
print(config['jobName'] + " is succeeded!")
self.jobmanager.stop(config["jobName"])
except:
print("Get state of job " + config["jobName"] + " failed!")
def submit(self, filepath):
###
# input_type: str
# input: the absolute path
# output_type: None
# output: None
# open the json file and use multi-thread to submit the job
###
try:
with open(filepath, 'r') as fp:
config = json5.load(fp)
if config["jobName"] not in self.jobs:
print("Skip job " + config["jobName"] + "!")
else:
config["jobName"] += ('_' + time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time())))
thread = Thread(target=self.runandlisten, args=(config,))
thread.start()
except:
print("The formulation of file " + filepath + " is wrong!")
class Scanner(object): # relate to the files
def __init__(self, threshold, config):
self.submitter = Submitter(threshold, config)
self.config = config
def toAbsroot(self, path):
###
# input_type: str
# input: the relative path
# output_type: str
# output: the absolute path
# change the relative path into absolute path
###
return os.path.abspath(path)
def scan(self, rootpath):
###
# input_type: str
# input: the relative path
# output_type: None
# output: None
# scan all json file within 2 steps and submit jobs according to them
###
rootpath = self.toAbsroot(rootpath)
dirs_and_files = os.listdir(rootpath)
# for parent, dirs, files in os.walk(rootpath): # search all files and dirs under rootpath
for item in dirs_and_files:
if os.path.isdir(rootpath + '/' + item):
files = os.listdir(rootpath + '/' + item)
if "prepare.sh" in files: # run the prepare shell script to download the data and code, then upload them to hdfs
os.system("/bin/bash " + rootpath + '/' + item + '/' + "prepare.sh " + self.config.hdfs_url + " " + self.config.PAI_username)
for file in files:
if file.endswith(".json"):
filepath = rootpath + '/' + item + '/' + file
print(filepath)
self.submitter.submit(filepath)
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='.')
parser.add_argument('--threshold', type=int, default=30)
parser.add_argument('--rest_server_url', type=str, default="")
parser.add_argument('--hdfs_url', type=str, default="")
parser.add_argument('--webhdfs_url', type=str, default="")
parser.add_argument('--PAI_username', type=str, default="")
parser.add_argument('--PAI_password', type=str, default="")
parser.add_argument('--jobs', type=str, default="")
args = parser.parse_args()
config = Config(rest_server_url=args.rest_server_url, hdfs_url=args.hdfs_url, webhdfs_url=args.hdfs_url, PAI_username=args.PAI_username, PAI_password=args.PAI_password, jobs=args.jobs)
scanner = Scanner(args.threshold, config)
scanner.scan(args.path)
|
global_thread.py
|
import threading
import time
g_var=100
def add():
global g_var
for i in range(5):
g_var+=1
print('the value in add is %d'%(g_var))
def get():
global g_var
print('the value in get is %d'%(g_var))
t1 = threading.Thread(target=add)
t1.start()
time.sleep(1)
t2 = threading.Thread(target=get)
t2.start()
|
thread.py
|
import threading
x = 0
def increment():
global x
for i in range(500000):
x += 1
def main():
global x
x = 0
t1 = threading.Thread(target=increment)
t2 = threading.Thread(target=increment)
t1.start()
t2.start()
t1.join()
t2.join()
print(x)
main()
|
hummingbirdconnection.py
|
# This module implements connection of a Hummingbird controller via USB. It is used by
# hummingbird.py to send and receive commands from the Hummingbird.
# The Hummingbird is a robotics kit to promote engineering and arts education, resulting from
# the Arts & Bots research program at Carnegie Mellon's CREATE lab.
# http://www.hummingbirdkit.com
import atexit
import os
import ctypes
import threading
import time
import platform
import sys
VENDOR_ID = 0x2354
DEVICE_ID = 0x2222
HIDAPI_LIBRARY_PATH = os.environ.get('HIDAPI_LIB_PATH', './')
PING_FREQUENCY_SECONDS = 2.0 # seconds
# Detect which operating system is present and load corresponding library
system = platform.system()
if system == 'Windows':
if sys.maxsize > 2**32:
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "hidapi64.dll"))
else:
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "hidapi32.dll"))
elif system == 'Linux':
if sys.maxsize > 2**32:
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "libhidapi64.so"))
else:
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "libhidapi32.so"
))
elif system == 'Darwin':
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "libhidapi.dylib"))
else:
hid_api = ctypes.CDLL(os.path.join(HIDAPI_LIBRARY_PATH, "libhidapipi.so"))
def _inherit_docstring(cls):
def doc_setter(method):
parent = getattr(cls, method.__name__)
method.__doc__ = parent.__doc__
return method
return doc_setter
class HummingbirdConnection:
""" USB connection to the Hummingbird robot. Uses the HID API
to read and write from the controller. """
c_humm_handle = ctypes.c_void_p(None)
c_io_buffer = ctypes.c_char_p(None)
cmd_id = 0
def is_open(self):
"""Returns True if connected to the controller."""
return bool(self.c_humm_handle)
def open(self):
""" Connect to the robot.
This method looks for a USB port the Hummingbird is connected to. """
_before_new_humm_connection(self)
if self.is_open():
self.close()
try:
hid_api.hid_open.restype = ctypes.c_void_p
self.c_humm_handle = hid_api.hid_open(
ctypes.c_ushort(VENDOR_ID),
ctypes.c_ushort(DEVICE_ID),
ctypes.c_void_p(None))
self.c_io_buffer = ctypes.create_string_buffer(9)
_new_humm_connected(self)
self.cmd_id = self.read_cmd_id()
except:
raise Exception("Failed to connect to the Hummingbird robot.")
def close(self):
""" Disconnect the robot. """
if self.c_humm_handle:
self.send(b'R', [0]) # exit to idle (rest) mode
hid_api.hid_close.argtypes = [ctypes.c_void_p]
hid_api.hid_close(self.c_humm_handle)
self.c_humm_handle = ctypes.c_void_p(None)
self.c_io_buffer = ctypes.c_char_p(None)
global _open_humms
if self in _open_humms:
_open_humms.remove(self)
def read_cmd_id(self):
""" Read the controller's internal command counter. """
#self.send('z', receive = True)
self.send(b'z')
data = self.receive()
return data[0]
def send(self, command, payload=()):
"""Send a command to the controller (internal).
command: The command ASCII character
payload: a list of up to 6 bytes of additional command info
"""
if not self.is_open():
raise Exception("Connection to Hummingbird was closed.")
# Format the buffer to contain the contents of the payload.
for i in range(7):
self.c_io_buffer[i] = b'\x00'
self.c_io_buffer[1] = command[0]
python_version = sys.version_info[0]
if payload:
for i in range(len(payload)):
if python_version >= 3:
self.c_io_buffer[i+2] = payload[i]
else:
self.c_io_buffer[i+2] = bytes(chr(payload[i]))
# Make sure command id is incremented if this is a receive case
if command == b's' or command == b'G':
self.cmd_id = (self.cmd_id + 1) % 256
if python_version >= 3:
self.c_io_buffer[8] = self.cmd_id
else:
self.c_io_buffer[8] = bytes(chr(self.cmd_id))
# Write to the Hummingbird bufffer
res = 0
while not res:
hid_api.hid_write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t]
res = hid_api.hid_write(self.c_humm_handle,
self.c_io_buffer,
ctypes.c_size_t(9))
def receive(self):
""" Read the data from the Hummingbird buffer. """
res = 9
while res > 0:
hid_api.hid_read.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t]
if system == 'Darwin':
res = hid_api.hid_read(self.c_humm_handle,
self.c_io_buffer,
ctypes.c_size_t(9))
else:
res = hid_api.hid_read_timeout(self.c_humm_handle,
self.c_io_buffer,
ctypes.c_size_t(9),
50)
if self.cmd_id == ord(self.c_io_buffer[8]):
break
return [ord(self.c_io_buffer[i]) for i in range(9)]
class ThreadedHummConnection(HummingbirdConnection):
"""Threaded implementation of Hummingbird Connection"""
lock = None
thread = None
main_thread = None
last_cmd_sent = time.time()
@_inherit_docstring(HummingbirdConnection)
def open(self):
HummingbirdConnection.open(self)
if not self.is_open():
return
self.lock = threading.Lock()
self.thread = threading.Thread(target=self.__class__._pinger, args=(self, ))
self.main_thread = threading.current_thread()
self.thread.start()
@_inherit_docstring(HummingbirdConnection)
def send(self, command, payload=(), receive=False):
try:
if self.lock is not None:
self.lock.acquire()
HummingbirdConnection.send(self, command, payload=payload)
self.last_cmd_sent = time.time()
finally:
if self.lock is not None:
self.lock.release()
@_inherit_docstring(HummingbirdConnection)
def receive(self):
try:
if self.lock is not None:
self.lock.acquire()
data = HummingbirdConnection.receive(self)
finally:
if self.lock is not None:
self.lock.release()
return data
def _pinger(self):
""" Sends keep-alive commands every few secconds of inactivity. """
while True:
if not self.lock:
break
if not self.c_humm_handle:
break
if not self.main_thread.isAlive():
break
try:
self.lock.acquire()
now = time.time()
if self.last_cmd_sent:
delta = now - self.last_cmd_sent
else:
delta = PING_FREQUENCY_SECONDS
if delta >= PING_FREQUENCY_SECONDS:
HummingbirdConnection.send(self, b'z')
HummingbirdConnection.receive(self)
self.last_cmd_sent = now
finally:
self.lock.release()
time.sleep(0.1)
@_inherit_docstring(HummingbirdConnection)
def close(self):
HummingbirdConnection.close(self)
self.thread.join()
self.lock = None
self.thread = None
# Functions that handle the list of open hummingbirds
_open_humms = []
def _before_new_humm_connection(humm):
global _open_humms
# close other connections
for robot in _open_humms:
if robot.is_open():
robot.close()
def _new_humm_connected(humm):
global _open_humms
if humm not in _open_humms:
_open_humms.append(humm)
def _close_all_humms():
global _open_humms
if not _open_humms:
return
for humm in _open_humms:
if humm.is_open():
humm.close()
atexit.register(_close_all_humms)
|
test_summary.py
|
import json
from mock import MagicMock
from mock import patch
from multiprocessing import Process
import os
import unittest
import warnings
import filelock
import numpy as np
import pytest
from chainerui import summary
try:
import chainer # NOQA
_chainer_installed = True
except (ImportError, TypeError):
_chainer_installed = False
if _chainer_installed:
from chainerui.report import audio_report
_audio_report_available = audio_report._available
from chainerui.report import image_report
_image_report_available = image_report._available
else:
_audio_report_available = False
_image_report_available = False
@pytest.fixture(autouse=True, scope='function')
def clear_cache():
yield
summary._chainerui_asset_observer.out = None
summary._chainerui_asset_observer.cache = []
summary._chainerui_asset_observer.saved_idx = 0
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_summary_image_without_output_path(func_dir):
summary._chainerui_asset_observer.default_output_path = func_dir
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
summary.image(img)
assert len(w) == 1
assert os.path.exists(meta_filepath)
summary.set_out(func_dir)
summary.image(img)
assert len(w) == 1
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_summary_reporter_image_without_output_path(func_dir):
summary._chainerui_asset_observer.default_output_path = func_dir
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
with summary.reporter() as r:
r.image(img)
assert len(w) == 1
assert os.path.exists(meta_filepath)
summary.set_out(func_dir)
with summary.reporter() as r:
r.image(img)
assert len(w) == 1
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_summary_image(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
summary.image(img, out=func_dir, epoch=10)
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
assert os.path.exists(meta_filepath)
with open(meta_filepath, 'r') as f:
metas = json.load(f)
assert len(metas) == 1
assert 'timestamp' in metas[0]
assert 'epoch' in metas[0]
assert metas[0]['epoch'] == 10
assert 'images' in metas[0]
assert 'image' in metas[0]['images']
saved_filename = metas[0]['images']['image']
assert saved_filename.startswith('image_')
assert saved_filename.endswith('.png')
img2 = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
summary.image(img2, 'test', out=func_dir, subdir='image', epoch=20)
with open(meta_filepath, 'r') as f:
metas2 = json.load(f)
assert len(metas2) == 2
assert 'timestamp' in metas2[1]
assert 'epoch' in metas2[1]
assert metas2[1]['epoch'] == 20
assert 'images' in metas2[1]
assert 'test' in metas2[1]['images']
saved_filename2 = metas2[1]['images']['test']
assert saved_filename != saved_filename2
assert saved_filename2.startswith(os.path.join('image', 'test_'))
assert saved_filename2.endswith('.png')
@unittest.skipUnless(_audio_report_available, 'Audio report is not available')
def test_summary_audio(func_dir):
audio = np.random.uniform(-1, 1, 16000)
summary.audio(audio, 16000, out=func_dir, epoch=10)
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
assert os.path.exists(meta_filepath)
with open(meta_filepath, 'r') as f:
metas = json.load(f)
assert len(metas) == 1
assert 'timestamp' in metas[0]
assert 'epoch' in metas[0]
assert metas[0]['epoch'] == 10
assert 'audios' in metas[0]
assert 'audio' in metas[0]['audios']
saved_filename = metas[0]['audios']['audio']
assert saved_filename.startswith('audio_')
assert saved_filename.endswith('.wav')
summary.audio(audio, 16000, out=func_dir, subdir='audio', epoch=20)
with open(meta_filepath, 'r') as f:
metas2 = json.load(f)
assert len(metas2) == 2
assert 'timestamp' in metas2[1]
assert 'epoch' in metas2[1]
assert metas2[1]['epoch'] == 20
assert 'audios' in metas2[1]
assert 'audio' in metas2[1]['audios']
saved_filename = metas2[1]['audios']['audio']
assert saved_filename.startswith(os.path.join('audio', 'audio_'))
assert saved_filename.endswith('.wav')
def test_summary_text(func_dir):
summary.text('content', out=func_dir, epoch=10)
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
assert os.path.exists(meta_filepath)
with open(meta_filepath, 'r') as f:
metas = json.load(f)
assert len(metas) == 1
assert 'timestamp' in metas[0]
assert metas[0].get('epoch', None) == 10
assert 'texts' in metas[0]
assert metas[0]['texts'].get('text', None) == 'content'
summary.text('content2', 'text2', out=func_dir, epoch=20)
with open(meta_filepath, 'r') as f:
metas2 = json.load(f)
assert len(metas2) == 2
assert 'timestamp' in metas2[1]
assert metas2[1].get('epoch', None) == 20
assert 'texts' in metas2[1]
assert metas2[1]['texts'].get('text2', None) == 'content2'
@unittest.skipUnless(_image_report_available, 'Image report is not available')
@unittest.skipUnless(_audio_report_available, 'Audio report is not available')
def test_summary_reporter_mix(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
img2 = np.copy(img)
audio = np.random.uniform(-1, 1, 16000)
audio2 = np.copy(audio)
with summary.reporter(prefix='with_', out=func_dir, epoch=10) as r:
r.image(img)
r.image(img2, 'test_image', subdir='image')
r.audio(audio, 16000)
r.audio(audio2, 16000, 'test_audio', subdir='audio')
r.text('content')
r.text('content2', 'test_text')
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
assert os.path.exists(meta_filepath)
with open(meta_filepath, 'r') as f:
metas = json.load(f)
assert len(metas) == 1
assert 'timestamp' in metas[0]
assert 'epoch' in metas[0]
assert metas[0]['epoch'] == 10
assert 'images' in metas[0]
assert 'with_image_0' in metas[0]['images']
saved_filename = metas[0]['images']['with_image_0']
assert saved_filename.startswith('with_image_0')
assert saved_filename.endswith('.png')
assert 'with_test_image' in metas[0]['images']
saved_filename1 = metas[0]['images']['with_test_image']
assert saved_filename1.startswith(os.path.join('image', 'with_test_image'))
assert saved_filename1.endswith('.png')
assert 'with_audio_2' in metas[0]['audios']
saved_filename3 = metas[0]['audios']['with_audio_2']
assert saved_filename3.startswith('with_audio_2')
assert saved_filename3.endswith('.wav')
assert 'with_test_audio' in metas[0]['audios']
saved_filename4 = metas[0]['audios']['with_test_audio']
assert saved_filename4.startswith(os.path.join('audio', 'with_test_audio'))
assert saved_filename4.endswith('.wav')
assert 'texts' in metas[0]
assert metas[0]['texts'].get('with_text_4', None) == 'content'
assert metas[0]['texts'].get('with_test_text', None) == 'content2'
img3 = np.copy(img)
img4 = np.copy(img)
audio3 = np.copy(audio)
audio4 = np.copy(audio)
with summary.reporter(
prefix='with_', out=func_dir, subdir='sub', epoch=20) as r:
r.image(img3)
r.image(img4, 'test_image', subdir='image')
r.audio(audio3, 44100)
r.audio(audio4, 44100, 'test_audio', subdir='audio')
r.text('content')
r.text('content2', 'test_text')
with open(meta_filepath, 'r') as f:
metas2 = json.load(f)
assert len(metas2) == 2
assert 'timestamp' in metas2[1]
assert 'epoch' in metas2[1]
assert metas2[1]['epoch'] == 20
assert 'images' in metas2[1]
assert 'with_image_0' in metas2[1]['images']
saved_filename = metas2[1]['images']['with_image_0']
assert saved_filename.startswith(os.path.join('sub', 'with_image_0'))
assert saved_filename.endswith('.png')
assert 'with_test_image' in metas2[1]['images']
saved_filename1 = metas2[1]['images']['with_test_image']
assert saved_filename1.startswith(
os.path.join('sub', 'image', 'with_test_image'))
assert saved_filename1.endswith('.png')
assert 'with_audio_2' in metas2[1]['audios']
saved_filename2 = metas2[1]['audios']['with_audio_2']
assert saved_filename2.startswith(os.path.join('sub', 'with_audio_2'))
assert saved_filename2.endswith('.wav')
assert 'with_test_audio' in metas2[1]['audios']
saved_filename3 = metas2[1]['audios']['with_test_audio']
assert saved_filename3.startswith(os.path.join(
'sub', 'audio', 'with_test_audio'))
assert saved_filename3.endswith('.wav')
assert 'texts' in metas2[1]
assert metas2[1]['texts'].get('with_text_4', None) == 'content'
assert metas2[1]['texts'].get('with_test_text', None) == 'content2'
def test_summary_reporter_empty(func_dir):
with summary.reporter(out=func_dir, epoch=10):
pass
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
assert not os.path.exists(meta_filepath)
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_summary_image_unavailable(func_dir):
mock_checker = MagicMock(return_value=False)
with patch('chainerui.report.image_report.check_available', mock_checker):
summary.image(np.zeros(10), out=func_dir)
assert not os.path.exists(
os.path.join(func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME))
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_summary_audio_unavailable(func_dir):
mock_checker = MagicMock(return_value=False)
with patch('chainerui.report.audio_report.check_available', mock_checker):
summary.audio(np.zeros(10), 40, out=func_dir)
assert not os.path.exists(
os.path.join(func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME))
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_reporter_image_unavailable(func_dir):
mock_checker = MagicMock(return_value=False)
with patch('chainerui.report.image_report.check_available', mock_checker):
with summary.reporter(out=func_dir) as r:
r.image(np.zeros(10))
assert not os.path.exists(
os.path.join(func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME))
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_reporter_audio_unavailable(func_dir):
mock_checker = MagicMock(return_value=False)
with patch('chainerui.report.audio_report.check_available', mock_checker):
with summary.reporter(out=func_dir) as r:
r.audio(np.zeros(10), 40)
assert not os.path.exists(
os.path.join(func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME))
def test_summary_called_multiple_script(func_dir):
# This test is not enough to check that _Summary object accepts whether
# called by multiple scripts or not, but it's difficult to test it.
# By increasing lock counter with taking a file lock during this test,
# the meta file is shared by multiple scripts virtually, and check the
# asset list is appended correctly.
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
metalock_filepath = meta_filepath + '.lock'
summary.text('test', out=func_dir, epoch=10)
assert os.path.exists(meta_filepath)
try:
p = Process(
target=summary.text, args=('test2',),
kwargs={'out': func_dir, 'epoch': 20})
with filelock.FileLock(metalock_filepath):
# virtually other script handles the meta file
# save a next image, expected to write after lock file is released
p.start()
# set dummy text, this process means added asset by other process
with open(meta_filepath, 'r') as f:
saved = json.load(f)
assert len(saved) == 1
with open(meta_filepath, 'w') as f:
saved.append({'dummy': 'text'})
json.dump(saved, f, indent=4)
finally:
p.join()
with open(meta_filepath) as f:
saved = json.load(f)
assert len(saved) == 3
assert saved[1].get('dummy', None) == 'text'
assert saved[2].get('epoch', None) == 20
def test_summary_timeout(func_dir, caplog):
meta_filepath = os.path.join(
func_dir, summary.CHAINERUI_ASSETS_METAFILE_NAME)
metalock_filepath = meta_filepath + '.lock'
with filelock.FileLock(metalock_filepath):
with summary.reporter(out=func_dir, timeout=0.1) as r:
# test process has already handled meta file,
# this saving process should be timeout
r.text('test')
assert len(caplog.records) == 1
assert 'is timeout' in caplog.records[0].message
assert not os.path.exists(meta_filepath)
|
nifty.py
|
"""@package geometric.nifty Nifty functions, originally intended to be imported by any module within ForceBalance.
This file was copied over from ForceBalance to geomeTRIC in order to lighten the dependencies of the latter.
Table of Contents:
- I/O formatting
- Math: Variable manipulation, linear algebra, least squares polynomial fitting
- Pickle: Expand Python's own pickle to accommodate writing XML etree objects
- Commands for submitting things to the Work Queue
- Various file and process management functions
- Development stuff (not commonly used)
Named after the mighty Sniffy Handy Nifty (King Sniffy)
@author Lee-Ping Wang
@date 2018-03-10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import itertools
import os
import re
import shutil
import sys
from select import select
import numpy as np
from numpy.linalg import multi_dot
# For Python 3 compatibility
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import threading
from pickle import Pickler, Unpickler
import tarfile
import time
import subprocess
import math
import six # For six.string_types
from subprocess import PIPE
from collections import OrderedDict, defaultdict
#import pybel as pb
#import openbabel as ob
#local
from . import elements
from contextlib import contextmanager
import io
@contextmanager
def custom_redirection(fileobj):
old = sys.stdout
sys.stdout = fileobj
try:
yield fileobj
finally:
sys.stdout = old
logging_initialized = False
#================================#
# Set up the logger #
#================================#
if not logging_initialized:
if "forcebalance" in __name__:
# If this module is part of ForceBalance, use the package level logger
from .output import *
elif "geometric" in __name__:
# This ensures logging behavior is consistent with the rest of geomeTRIC
from logging import *
logger = getLogger(__name__)
logger.setLevel(INFO)
else:
# Previous default behavior if FB package level loggers could not be imported
from logging import *
class RawStreamHandler(StreamHandler):
"""Exactly like output.StreamHandler except it does no extra formatting
before sending logging messages to the stream. This is more compatible with
how output has been displayed in ForceBalance. Default stream has also been
changed from stderr to stdout"""
def __init__(self, stream=sys.stdout):
super(RawStreamHandler, self).__init__(stream)
def emit(self, record):
message = record.getMessage()
self.stream.write(message)
self.flush()
# logger=getLogger()
# logger.handlers = [RawStreamHandler(sys.stdout)]
# LPW: Daniel Smith suggested these changes to improve logger behavior
logger = getLogger("NiftyLogger")
logger.setLevel(INFO)
if not logger.handlers:
handler = RawStreamHandler()
logger.addHandler(handler)
logging_initialized = True
try:
import bz2
HaveBZ2 = True
except ImportError:
logger.warning(
"bz2 module import failed (used in compressing or decompressing pickle files)\n")
HaveBZ2 = False
try:
import gzip
HaveGZ = True
except ImportError:
logger.warning(
"gzip module import failed (used in compressing or decompressing pickle files)\n")
HaveGZ = False
#=========================#
# I/O formatting #
#=========================#
# These functions may be useful someday but I have not tested them
# def bzip2(src):
# dest = src+'.bz2'
# if not os.path.exists(src):
# logger.error('File to be compressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Archive to be created already exists')
# raise RuntimeError
# with open(src, 'rb') as input:
# with bz2.BZ2File(dest, 'wb', compresslevel=9) as output:
# copyfileobj(input, output)
# os.remove(input)
# def bunzip2(src):
# dest = re.sub('\.bz2$', '', src)
# if not os.path.exists(src):
# logger.error('File to be decompressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Target path for decompression already exists')
# raise RuntimeError
# with bz2.BZ2File(src, 'rb', compresslevel=9) as input:
# with open(dest, 'wb') as output:
# copyfileobj(input, output)
# os.remove(input)
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO):
"""Printout of a 1-D vector.
@param[in] vec1d a 1-D vector
"""
v2a = np.array(vec1d)
for i in range(v2a.shape[0]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % v2a[i])
logger.log(loglevel, '\n')
def astr(vec1d, precision=4):
""" Write an array to a string so we can use it to key a dictionary. """
return ' '.join([("%% .%ie " % precision % i) for i in vec1d])
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO):
"""Printout of a 2-D array.
@param[in] mat2d a 2-D array
"""
m2a = np.array(mat2d)
for i in range(m2a.shape[0]):
for j in range(m2a.shape[1]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % m2a[i][j])
logger.log(loglevel, '\n')
def grouper(iterable, n):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
lzip = [[j for j in i if j is not None] for i in list(zip_longest(*args))]
return lzip
def encode(l):
return [[len(list(group)), name] for name, group in itertools.groupby(l)]
def segments(e):
# Takes encoded input.
begins = np.array([sum([k[0] for k in e][:j])
for j, i in enumerate(e) if i[1] == 1])
lens = np.array([i[0] for i in e if i[1] == 1])
return [(i, i+j) for i, j in zip(begins, lens)]
def commadash(l):
# Formats a list like [27, 28, 29, 30, 31, 88, 89, 90, 91, 100, 136, 137, 138, 139]
# into '27-31,88-91,100,136-139
L = sorted(l)
if len(L) == 0:
return "(empty)"
L.append(L[-1]+1)
LL = [i in L for i in range(L[-1])]
return ','.join('%i-%i' % (i[0]+1, i[1]) if (i[1]-1 > i[0]) else '%i' % (i[0]+1) for i in segments(encode(LL)))
def uncommadash(s):
# Takes a string like '27-31,88-91,100,136-139'
# and turns it into a list like [26, 27, 28, 29, 30, 87, 88, 89, 90, 99, 135, 136, 137, 138]
L = []
try:
for w in s.split(','):
ws = w.split('-')
a = int(ws[0])-1
if len(ws) == 1:
b = int(ws[0])
elif len(ws) == 2:
b = int(ws[1])
else:
logger.warning("Dash-separated list cannot exceed length 2\n")
raise
if a < 0 or b <= 0 or b <= a:
if a < 0 or b <= 0:
logger.warning(
"Items in list cannot be zero or negative: %d %d\n" % (a, b))
else:
logger.warning(
"Second number cannot be smaller than first: %d %d\n" % (a, b))
raise
newL = list(range(a, b))
if any([i in L for i in newL]):
logger.warning("Duplicate entries found in list\n")
raise
L += newL
if sorted(L) != L:
logger.warning("List is out of order\n")
raise
except:
logger.error(
'Invalid string for converting to list of numbers: %s\n' % s)
raise RuntimeError
return L
def natural_sort(l):
""" Return a natural sorted list. """
# Convert a character to a digit or a lowercase character
def convert(text): return int(text) if text.isdigit() else text.lower()
# Split string into "integer" and "noninteger" fields and convert each one
def alphanum_key(key): return [convert(c)
for c in re.split('([0-9]+)', key)]
# Sort strings using these keys in descending order of importance, I guess.
return sorted(l, key=alphanum_key)
def printcool(text, sym="#", bold=False, color=2, ansi=None, bottom='-', minwidth=50, center=True, sym2="="):
"""Cool-looking printout for slick formatting of output.
@param[in] text The string that the printout is based upon. This function
will print out the string, ANSI-colored and enclosed in the symbol
for example:\n
<tt> ################# </tt>\n
<tt> ### I am cool ### </tt>\n
<tt> ################# </tt>
@param[in] sym The surrounding symbol\n
@param[in] bold Whether to use bold print
@param[in] color The ANSI color:\n
1 red\n
2 green\n
3 yellow\n
4 blue\n
5 magenta\n
6 cyan\n
7 white
@param[in] bottom The symbol for the bottom bar
@param[in] minwidth The minimum width for the box, if the text is very short
then we insert the appropriate number of padding spaces
@return bar The bottom bar is returned for the user to print later, e.g. to mark off a 'section'
"""
def newlen(l):
return len(re.sub(r"\x1b\[[0-9;]*m", "", l))
text = text.split('\n')
width = max(minwidth, max([newlen(line) for line in text]))
bar = ''.join([sym2 for i in range(width + 6)])
bar = sym + bar + sym
#bar = ''.join([sym for i in range(width + 8)])
logger.info('\r'+bar + '\n')
for ln, line in enumerate(text):
if type(center) is list:
c1 = center[ln]
else:
c1 = center
if c1:
padleft = ' ' * (int((width - newlen(line))/2))
else:
padleft = ''
padright = ' ' * (width - newlen(line) - len(padleft))
if ansi is not None:
ansi = str(ansi)
logger.info("%s| \x1b[%sm%s " % (
sym, ansi, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
elif color is not None:
if color == 0 and bold:
logger.info("%s| \x1b[1m%s " % (
sym, padleft) + line + " %s\x1b[0m |%s\n" % (padright, sym))
elif color == 0:
logger.info("%s| %s " % (sym, padleft)+line +
" %s |%s\n" % (padright, sym))
else:
logger.info("%s| \x1b[%s9%im%s " % (
sym, bold and "1;" or "", color, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
# if color == 3 or color == 7:
# print "%s\x1b[40m\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
# else:
# print "%s\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
else:
warn_press_key("Inappropriate use of printcool")
logger.info(bar + '\n')
botbar = ''.join([bottom for i in range(width + 8)])
return botbar + '\n'
def printcool_dictionary(Dict, title="Dictionary Keys : Values", bold=False, color=2, keywidth=25, topwidth=50, center=True, leftpad=0):
"""See documentation for printcool; this is a nice way to print out keys/values in a dictionary.
The keys in the dictionary are sorted before printing out.
@param[in] dict The dictionary to be printed
@param[in] title The title of the printout
"""
if Dict is None:
return
bar = printcool(title, bold=bold, color=color,
minwidth=topwidth, center=center)
def magic_string(str):
# This cryptic command returns a string with the number of characters specified as a variable. :P
# Useful for printing nice-looking dictionaries, i guess.
# print "\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"'))
return eval("\'%%-%is\' %% '%s'" % (keywidth, str.replace("'", "\\'").replace('"', '\\"')))
if isinstance(Dict, OrderedDict):
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),
str(Dict[key])) for key in Dict if Dict[key] is not None]))
else:
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)), str(Dict[key]))
for key in sorted([i for i in Dict]) if Dict[key] is not None]))
logger.info("\n%s" % bar)
#===============================#
#| Math: Variable manipulation |#
#===============================#
def isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
try:
word = str(word)
except:
return False
return re.match('^[-+]?[0-9]+$', word)
def isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
try:
word = str(word)
except:
return False
if len(word) == 0:
return False
return re.match(r'^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$', word)
def isdecimal(word):
"""Matches things with a decimal only; see isint and isfloat.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is a number with a decimal point
"""
try:
word = str(word)
except:
return False
return isfloat(word) and not isint(word)
def floatornan(word):
"""Returns a big number if we encounter NaN.
@param[in] word The string to be converted
@return answer The string converted to a float; if not a float, return 1e10
@todo I could use suggestions for making this better.
"""
big = 1e10
if isfloat(word):
return float(word)
else:
logger.info("Setting %s to % .1e\n" % big)
return big
def col(vec):
"""
Given any list, array, or matrix, return a 1-column 2D array.
Input:
vec = The input vector that is to be made into a column
Output:
A 1-column 2D array
"""
return np.array(vec).reshape(-1, 1)
def row(vec):
"""Given any list, array, or matrix, return a 1-row 2D array.
@param[in] vec The input vector that is to be made into a row
@return answer A 1-row 2D array
"""
return np.array(vec).reshape(1, -1)
def flat(vec):
"""Given any list, array, or matrix, return a single-index array.
@param[in] vec The data to be flattened
@return answer The flattened data
"""
return np.array(vec).reshape(-1)
def est124(val):
"""Given any positive floating point value, return a value [124]e+xx
that is closest to it in the log space.
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log4 = 0.6020599913279624
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log4):
fac = 2.0
elif logfrac < 0.5*(log4+log10):
fac = 4.0
else:
fac = 10.0
return fac*10**logint
def est1234568(val):
"""Given any positive floating point value, return a value [1234568]e+xx
that is closest to it in the log space. Just because I don't like seven
and nine. Call me a numberist?
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log3 = np.log10(3)
log4 = 0.6020599913279624
log5 = np.log10(5)
log6 = np.log10(6)
log8 = np.log10(8)
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log3):
fac = 2.0
elif logfrac < 0.5*(log3+log4):
fac = 3.0
elif logfrac < 0.5*(log4+log5):
fac = 4.0
elif logfrac < 0.5*(log5+log6):
fac = 5.0
elif logfrac < 0.5*(log6+log8):
fac = 6.0
elif logfrac < 0.5*(log8+log10):
fac = 8.0
else:
fac = 10.0
return fac*10**logint
def monotonic(arr, start, end):
# Make sure an array is monotonically decreasing from the start to the end.
a0 = arr[start]
i0 = start
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
arr[i0:i+1] = np.linspace(a0, arr[i], i-i0+1)
a0 = arr[i]
i0 = i
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
arr[i:i0+1] = np.linspace(arr[i], a0, i0-i+1)
a0 = arr[i]
i0 = i
i -= 1
def monotonic_decreasing(arr, start=None, end=None, verbose=False):
"""
Return the indices of an array corresponding to strictly monotonic
decreasing behavior.
Parameters
----------
arr : numpy.ndarray
Input array
start : int
Starting index (first element if None)
end : int
Ending index (last element if None)
Returns
-------
indices : numpy.ndarray
Selected indices
"""
if start is None:
start = 0
if end is None:
end = len(arr) - 1
a0 = arr[start]
idx = [start]
if verbose:
logger.info("Starting @ %i : %.6f" % (start, arr[start]))
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose:
logger.info("Including %i : %.6f" % (i, arr[i]))
else:
if verbose:
logger.info("Excluding %i : %.6f" % (i, arr[i]))
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose:
logger.info("Including %i : %.6f" % (i, arr[i]))
else:
if verbose:
logger.info("Excluding %i : %.6f" % (i, arr[i]))
i -= 1
return np.array(idx)
#====================================#
#| Math: Vectors and linear algebra |#
#====================================#
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
def invert_svd(X, thresh=1e-12):
"""
Invert a matrix using singular value decomposition.
@param[in] X The 2-D NumPy array containing the matrix to be inverted
@param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero
@return Xt The 2-D NumPy array containing the inverted matrix
"""
u, s, vh = np.linalg.svd(X, full_matrices=0)
uh = np.transpose(u)
v = np.transpose(vh)
si = s.copy()
for i in range(s.shape[0]):
if abs(s[i]) > thresh:
si[i] = 1./s[i]
else:
si[i] = 0.0
si = np.diag(si)
Xt = multi_dot([v, si, uh])
return Xt
#==============================#
#| Linear least squares |#
#==============================#
def get_least_squares(x, y, w=None, thresh=1e-12):
"""
@code
__ __
| |
| 1 (x0) (x0)^2 (x0)^3 |
| 1 (x1) (x1)^2 (x1)^3 |
| 1 (x2) (x2)^2 (x2)^3 |
| 1 (x3) (x3)^2 (x3)^3 |
| 1 (x4) (x4)^2 (x4)^3 |
|__ __|
@endcode
@param[in] X (2-D array) An array of X-values (see above)
@param[in] Y (array) An array of Y-values (only used in getting the least squares coefficients)
@param[in] w (array) An array of weights, hopefully normalized to one.
@param[out] Beta The least-squares coefficients
@param[out] Hat The hat matrix that takes linear combinations of data y-values to give fitted y-values (weights)
@param[out] yfit The fitted y-values
@param[out] MPPI The Moore-Penrose pseudoinverse (multiply by Y to get least-squares coefficients, multiply by dY/dk to get derivatives of least-squares coefficients)
"""
# X is a 'tall' matrix.
X = np.array(x)
if len(X.shape) == 1:
X = X[:, np.newaxis]
Y = col(y)
n_x = X.shape[0]
n_fit = X.shape[1]
if n_fit > n_x:
logger.warning(
"Argh? It seems like this problem is underdetermined!\n")
# Build the weight matrix.
if w is not None:
if len(w) != n_x:
warn_press_key(
"The weight array length (%i) must be the same as the number of 'X' data points (%i)!" % len(w), n_x)
w /= np.mean(w)
WH = np.diag(w**0.5)
else:
WH = np.eye(n_x)
# Make the Moore-Penrose Pseudoinverse.
# if n_fit == n_x:
# MPPI = np.linalg.inv(WH*X)
# else:
# This resembles the formula (X'WX)^-1 X' W^1/2
MPPI = np.linalg.pinv(np.dot(WH, X))
Beta = multi_dot([MPPI, WH, Y])
Hat = multi_dot([WH, X, MPPI])
yfit = flat(np.dot(Hat, Y))
# Return three things: the least-squares coefficients, the hat matrix (turns y into yfit), and yfit
# We could get these all from MPPI, but I might get confused later on, so might as well do it here :P
return np.array(Beta).flatten(), np.array(Hat), np.array(yfit).flatten(), np.array(MPPI)
#===========================================#
#| John's statisticalInefficiency function |#
#===========================================#
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.shape[0]
# Be sure A_n and B_n have the same dimensions.
if A_n.shape != B_n.shape:
logger.error('A_n and B_n must have same dimensions.\n')
raise ParameterError
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if sigma2_AB == 0:
if warn:
logger.warning(
'Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency\n')
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while t < N-1:
# compute normalized fluctuation correlation function at time t
C = sum(dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)] *
dA_n[t:N]) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast:
increment += 1
# g must be at least unity
if g < 1.0:
g = 1.0
# Return the computed statistical inefficiency.
return g
def mean_stderr(ts):
"""Return mean and standard deviation of a time series ts."""
return np.mean(ts), \
np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
# Slices a 2D array of data by column. The new array is fed into the statisticalInefficiency function.
def multiD_statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
n_row = A_n.shape[0]
n_col = A_n.shape[-1]
multiD_sI = np.zeros((n_row, n_col))
for col in range(n_col):
if B_n is None:
multiD_sI[:, col] = statisticalInefficiency(
A_n[:, col], B_n, fast, mintime, warn)
else:
multiD_sI[:, col] = statisticalInefficiency(
A_n[:, col], B_n[:, col], fast, mintime, warn)
return multiD_sI
#========================================#
#| Loading compressed pickles |#
#========================================#
def lp_dump(obj, fnm, protocol=0):
""" Write an object to a zipped pickle file specified by the path. """
# Safeguard against overwriting files? Nah.
# if os.path.exists(fnm):
# logger.error("lp_dump cannot write to an existing path")
# raise IOError
if os.path.islink(fnm):
logger.warning(
"Trying to write to a symbolic link %s, removing it first\n" % fnm)
os.unlink(fnm)
if HaveGZ:
f = gzip.GzipFile(fnm, 'wb')
elif HaveBZ2:
f = bz2.BZ2File(fnm, 'wb')
else:
f = open(fnm, 'wb')
Pickler(f, protocol).dump(obj)
f.close()
def lp_load(fnm):
""" Read an object from a bzipped file specified by the path. """
if not os.path.exists(fnm):
logger.error(
"lp_load cannot read from a path that doesn't exist (%s)" % fnm)
raise IOError
def load_uncompress():
logger.warning(
"Compressed file loader failed, attempting to read as uncompressed file\n")
f = open(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_bz2():
f = bz2.BZ2File(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_gz():
f = gzip.GzipFile(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
if HaveGZ:
try:
answer = load_gz()
except:
if HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
elif HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
return answer
#==============================#
#| Work Queue stuff |#
#==============================#
try:
import work_queue
except:
pass
#logger.warning("Work Queue library import fail (You can't queue up jobs using Work Queue)\n")
# Global variable corresponding to the Work Queue object
WORK_QUEUE = None
# Global variable containing a mapping from target names to Work Queue task IDs
WQIDS = defaultdict(list)
def getWorkQueue():
global WORK_QUEUE
return WORK_QUEUE
def getWQIds():
global WQIDS
return WQIDS
def createWorkQueue(wq_port, debug=True, name='geomeTRIC'):
global WORK_QUEUE
if debug:
work_queue.set_debug_flag('all')
WORK_QUEUE = work_queue.WorkQueue(
port=wq_port, catalog=True, exclusive=False, shutdown=False)
WORK_QUEUE.specify_name(name)
#WORK_QUEUE.specify_keepalive_timeout(8640000)
WORK_QUEUE.specify_keepalive_interval(8640000)
def destroyWorkQueue():
# Convenience function to destroy the Work Queue objects.
global WORK_QUEUE, WQIDS
WORK_QUEUE = None
WQIDS = defaultdict(list)
def queue_up(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of files) A list of locations of the input files.
@param[in] output_files (list of files) A list of locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
cwd = os.getcwd()
for f in input_files:
lf = os.path.join(cwd, f)
task.specify_input_file(lf, f, cache=False)
for f in output_files:
lf = os.path.join(cwd, f)
task.specify_output_file(lf, f, cache=False)
task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS)
if tag is None:
tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, %staskid %i\n" % (
command, "tag %s, " % tag if tag != command else "", taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def queue_up_src_dest(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue. This function is a bit fancier in that we can explicitly
specify where the input files come from, and where the output files go to.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of 2-tuples) A list of local and
remote locations of the input files.
@param[in] output_files (list of 2-tuples) A list of local and
remote locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
for f in input_files:
# print f[0], f[1]
task.specify_input_file(f[0], f[1], cache=False)
for f in output_files:
# print f[0], f[1]
task.specify_output_file(f[0], f[1], cache=False)
task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS)
if tag is None:
tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info(
"Submitting command '%s' to the Work Queue, taskid %i\n" % (command, taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def wq_wait1(wq, wait_time=10, wait_intvl=1, print_time=60, verbose=False):
""" This function waits ten seconds to see if a task in the Work Queue has finished. """
global WQIDS
if verbose:
logger.info('---\n')
if wait_intvl >= wait_time:
wait_time = wait_intvl
numwaits = 1
else:
numwaits = int(wait_time/wait_intvl)
for sec in range(numwaits):
task = wq.wait(wait_intvl)
if task:
exectime = task.cmd_execution_time/1000000
if verbose:
logger.info('A job has finished!\n')
logger.info('Job name = ' + task.tag +
'command = ' + task.command + '\n')
logger.info("status = " + task.status + '\n')
logger.info("return_status = " + task.return_status)
logger.info("result = " + task.result)
logger.info("host = " + task.hostname + '\n')
logger.info("execution time = " + exectime)
logger.info("total_bytes_transferred = " +
task.total_bytes_transferred + '\n')
if task.result != 0:
oldid = task.id
oldhost = task.hostname
tgtname = "None"
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
tgtname = tnm
WQIDS[tnm].remove(task.id)
taskid = wq.submit(task)
logger.warning("Task '%s' (task %i) failed on host %s (%i seconds), resubmitted: taskid %i\n" % (
task.tag, oldid, oldhost, exectime, taskid))
WQIDS[tgtname].append(taskid)
else:
if hasattr(task, 'print_time'):
print_time = task.print_time
# Assume that we're only interested in printing jobs that last longer than a minute.
if exectime > print_time:
logger.info("Task '%s' (task %i) finished successfully on host %s (%i seconds)\n" % (
task.tag, task.id, task.hostname, exectime))
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
WQIDS[tnm].remove(task.id)
del task
# LPW 2018-09-10 Updated to use stats fields from CCTools 6.2.10
# Please upgrade CCTools version if errors are encountered during runtime.
if verbose:
logger.info("Workers: %i init, %i idle, %i busy, %i total joined, %i total removed\n"
% (wq.stats.workers_init, wq.stats.workers_idle, wq.stats.workers_busy, wq.stats.workers_joined, wq.stats.workers_removed))
logger.info("Tasks: %i running, %i waiting, %i dispatched, %i submitted, %i total complete\n"
% (wq.stats.tasks_running, wq.stats.tasks_waiting, wq.stats.tasks_dispatched, wq.stats.tasks_submitted, wq.stats.tasks_done))
logger.info("Data: %i / %i kb sent/received\n" %
(int(wq.stats.bytes_sent/1024), int(wq.stats.bytes_received/1024)))
else:
logger.info("\r%s : %i/%i workers busy; %i/%i jobs complete \r" %
(time.ctime(), wq.stats.workers_busy, wq.stats.workers_connected, wq.stats.tasks_done, wq.stats.tasks_submitted))
if time.time() - wq_wait1.t0 > 900:
wq_wait1.t0 = time.time()
logger.info('\n')
wq_wait1.t0 = time.time()
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False):
""" This function waits until the work queue is completely empty. """
while not wq.empty():
wq_wait1(wq, wait_time=wait_time, wait_intvl=wait_intvl,
print_time=print_time, verbose=verbose)
#=====================================#
#| File and process management stuff |#
#=====================================#
def click():
""" Stopwatch function for timing. """
ans = time.time() - click.t0
click.t0 = time.time()
return ans
click.t0 = time.time()
# Back up a file.
def bak(path, dest=None):
oldf = path
newf = None
if os.path.exists(path):
dnm, fnm = os.path.split(path)
if dnm == '':
dnm = '.'
base, ext = os.path.splitext(fnm)
if dest is None:
dest = dnm
if not os.path.isdir(dest):
os.makedirs(dest)
i = 1
while True:
fnm = "%s_%i%s" % (base, i, ext)
newf = os.path.join(dest, fnm)
if not os.path.exists(newf):
break
i += 1
logger.info("Backing up %s -> %s\n" % (oldf, newf))
shutil.move(oldf, newf)
return newf
# Purpose: Given a file name and/or an extension, do one of the following:
# 1) If provided a file name, check the file, crash if not exist and err==True. Return the file name.
# 2) If list is empty but extension is provided, check if one file exists that matches
# the extension. If so, return the file name.
# 3) If list is still empty and err==True, then crash with an error.
def onefile(fnm=None, ext=None, err=False):
if fnm is None and ext is None:
if err:
logger.error(
"Must provide either filename or extension to onefile()")
raise RuntimeError
else:
return None
if fnm is not None:
if os.path.exists(fnm):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (
os.path.join(os.getcwd(), os.path.basename(fnm)), os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (
os.path.abspath(fnm), os.getcwd()))
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (
os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
return os.path.basename(fnm)
elif err == True or ext is None:
logger.error("File specified by %s does not exist!" % fnm)
raise RuntimeError
elif ext is not None:
warn_once(
"File specified by %s does not exist - will try to autodetect .%s extension" % (fnm, ext))
answer = None
cwd = os.getcwd()
ls = [i for i in os.listdir(cwd) if i.endswith('.%s' % ext)]
if len(ls) != 1:
if err:
logger.error("Cannot find a unique file with extension .%s in %s (%i found; %s)" % (
ext, cwd, len(ls), ' '.join(ls)))
raise RuntimeError
else:
warn_once("Cannot find a unique file with extension .%s in %s (%i found; %s)" %
(ext, cwd, len(ls), ' '.join(ls)), warnhash="Found %i .%s files" % (len(ls), ext))
else:
answer = os.path.basename(ls[0])
warn_once("Autodetected %s in %s" % (answer, cwd),
warnhash="Autodetected %s" % answer)
return answer
# Purpose: Given a file name / file list and/or an extension, do one of the following:
# 1) If provided a file list, check each file in the list
# and crash if any file does not exist. Return the list.
# 2) If provided a file name, check the file and crash if the file
# does not exist. Return a length-one list with the file name.
# 3) If list is empty but extension is provided, check for files that
# match the extension. If so, append them to the list.
# 4) If list is still empty and err==True, then crash with an error.
def listfiles(fnms=None, ext=None, err=False, dnm=None):
answer = []
cwd = os.path.abspath(os.getcwd())
if dnm is not None:
os.chdir(dnm)
if isinstance(fnms, list):
for i in fnms:
if not os.path.exists(i):
logger.error('Specified %s but it does not exist' % i)
raise RuntimeError
answer.append(i)
elif isinstance(fnms, six.string_types):
if not os.path.exists(fnms):
logger.error('Specified %s but it does not exist' % fnms)
raise RuntimeError
answer = [fnms]
elif fnms is not None:
logger.info(fnms)
logger.error(
'First argument to listfiles must be a list, a string, or None')
raise RuntimeError
if answer == [] and ext is not None:
answer = [os.path.basename(i) for i in os.listdir(
os.getcwd()) if i.endswith('.%s' % ext)]
if answer == [] and err:
logger.error('listfiles function failed to come up with a file! (fnms = %s ext = %s)' % (
str(fnms), str(ext)))
raise RuntimeError
for ifnm, fnm in enumerate(answer):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (
os.path.join(os.getcwd(), os.path.basename(fnm)), os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (
os.path.abspath(fnm), os.getcwd()))
answer[ifnm] = os.path.basename(fnm)
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (
os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
answer[ifnm] = os.path.basename(fnm)
os.chdir(cwd)
return answer
def extract_tar(tarfnm, fnms, force=False):
"""
Extract a list of files from .tar archive with any compression.
The file is extracted to the base folder of the archive.
Parameters
----------
tarfnm :
Name of the archive file.
fnms : str or list
File names to be extracted.
force : bool, optional
If true, then force extraction of file even if they already exist on disk.
"""
# Get path of tar file.
fdir = os.path.abspath(os.path.dirname(tarfnm))
# If all files exist, then return - no need to extract.
if (not force) and all([os.path.exists(os.path.join(fdir, f)) for f in fnms]):
return
# If the tar file doesn't exist or isn't valid, do nothing.
if not os.path.exists(tarfnm):
return
if not tarfile.is_tarfile(tarfnm):
return
# Check type of fnms argument.
if isinstance(fnms, six.string_types):
fnms = [fnms]
# Load the tar file.
arch = tarfile.open(tarfnm, 'r')
# Extract only the files we have (to avoid an exception).
all_members = arch.getmembers()
all_names = [f.name for f in all_members]
members = [f for f in all_members if f.name in fnms]
# Extract files to the destination.
arch.extractall(fdir, members=members)
def GoInto(Dir):
if os.path.exists(Dir):
if os.path.isdir(Dir):
pass
else:
logger.error(
"Tried to create directory %s, it exists but isn't a directory\n" % newdir)
raise RuntimeError
else:
os.makedirs(Dir)
os.chdir(Dir)
def allsplit(Dir):
# Split a directory into all directories involved.
s = os.path.split(os.path.normpath(Dir))
if s[1] == '' or s[1] == '.':
return []
return allsplit(s[0]) + [s[1]]
def Leave(Dir):
if os.path.split(os.getcwd())[1] != Dir:
logger.error("Trying to leave directory %s, but we're actually in directory %s (check your code)\n" % (
Dir, os.path.split(os.getcwd())[1]))
raise RuntimeError
for i in range(len(allsplit(Dir))):
os.chdir('..')
# Dictionary containing specific error messages for specific missing files or file patterns
specific_lst = [(['mdrun', 'grompp', 'trjconv', 'g_energy', 'g_traj'], "Make sure to install GROMACS and add it to your path (or set the gmxpath option)"),
(['force.mdin', 'stage.leap'],
"This file is needed for setting up AMBER force matching targets"),
(['conf.pdb', 'mono.pdb'],
"This file is needed for setting up OpenMM condensed phase property targets"),
(['liquid.xyz', 'liquid.key', 'mono.xyz', 'mono.key'],
"This file is needed for setting up OpenMM condensed phase property targets"),
(['dynamic', 'analyze', 'minimize', 'testgrad', 'vibrate', 'optimize', 'polarize', 'superpose'],
"Make sure to install TINKER and add it to your path (or set the tinkerpath option)"),
(['runcuda.sh', 'npt.py', 'npt_tinker.py'],
"This file belongs in the ForceBalance source directory, not sure why it is missing"),
(['input.xyz'], "This file is needed for TINKER molecular property targets"),
(['.*key$', '.*xyz$'],
"I am guessing this file is probably needed by TINKER"),
(['.*gro$', '.*top$', '.*itp$', '.*mdp$', '.*ndx$'],
"I am guessing this file is probably needed by GROMACS")
]
# Build a dictionary mapping all of the keys in the above lists to their error messages
specific_dct = dict(list(itertools.chain(
*[[(j, i[1]) for j in i[0]] for i in specific_lst])))
def MissingFileInspection(fnm):
fnm = os.path.split(fnm)[1]
answer = ""
for key in specific_dct:
if answer == "":
answer += "\n"
if re.match(key, fnm):
answer += "%s\n" % specific_dct[key]
return answer
def wopen(dest, binary=False):
""" If trying to write to a symbolic link, remove it first. """
if os.path.islink(dest):
logger.warning(
"Trying to write to a symbolic link %s, removing it first\n" % dest)
os.unlink(dest)
if binary:
return open(dest, 'wb')
else:
return open(dest, 'w')
def LinkFile(src, dest, nosrcok=False):
if os.path.abspath(src) == os.path.abspath(dest):
return
if os.path.exists(src):
# Remove broken link
if os.path.islink(dest) and not os.path.exists(dest):
os.remove(dest)
os.symlink(src, dest)
elif os.path.exists(dest):
if os.path.islink(dest):
pass
else:
logger.error(
"Tried to create symbolic link %s to %s, destination exists but isn't a symbolic link\n" % (src, dest))
raise RuntimeError
else:
os.symlink(src, dest)
else:
if not nosrcok:
logger.error("Tried to create symbolic link %s to %s, but source file doesn't exist%s\n" % (
src, dest, MissingFileInspection(src)))
raise RuntimeError
def CopyFile(src, dest):
if os.path.exists(src):
if os.path.exists(dest):
if os.path.islink(dest):
logger.error(
"Tried to copy %s to %s, destination exists but it's a symbolic link\n" % (src, dest))
raise RuntimeError
else:
shutil.copy2(src, dest)
else:
logger.error("Tried to copy %s to %s, but source file doesn't exist%s\n" % (
src, dest, MissingFileInspection(src)))
raise RuntimeError
def link_dir_contents(abssrcdir, absdestdir):
for fnm in os.listdir(abssrcdir):
srcfnm = os.path.join(abssrcdir, fnm)
destfnm = os.path.join(absdestdir, fnm)
if os.path.islink(destfnm) and not os.path.exists(destfnm):
os.remove(destfnm)
if os.path.isfile(srcfnm) or (os.path.isdir(srcfnm) and fnm == 'IC'):
if not os.path.exists(destfnm):
#print "Linking %s to %s" % (srcfnm, destfnm)
os.symlink(srcfnm, destfnm)
def remove_if_exists(fnm):
""" Remove the file if it exists (doesn't return an error). """
if os.path.exists(fnm):
os.remove(fnm)
def which(fnm):
# Get the location of a file. Works only on UNIX-like file systems.
try:
return os.path.split(os.popen('which %s 2> /dev/null' % fnm).readlines()[0].strip())[0]
except:
return ''
# Thanks to cesarkawakami on #python (IRC freenode) for this code.
class LineChunker(object):
def __init__(self, callback):
self.callback = callback
self.buf = ""
def push(self, data):
# Added by LPW during Py3 compatibility; ran into some trouble decoding strings such as
# "a" with umlaut on top. I guess we can ignore these for now. For some reason,
# Py2 never required decoding of data, I can simply add it to the wtring.
# self.buf += data # Old Py2 code...
self.buf += data.decode('utf-8') # errors='ignore')
self.nomnom()
def close(self):
if self.buf:
self.callback(self.buf + "\n")
def nomnom(self):
# Splits buffer by new line or carriage return, and passes
# the splitted results onto processing.
while "\n" in self.buf or "\r" in self.buf:
chunk, sep, self.buf = re.split(r"(\r|\n)", self.buf, maxsplit=1)
self.callback(chunk + sep)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _exec(command, print_to_screen=False, outfnm=None, logfnm=None, stdin="",
print_command=True, copy_stdout=True, copy_stderr=False, persist=False,
expand_cr=False, print_error=True, rbytes=1, cwd=None, **kwargs):
"""Runs command line using subprocess, optionally returning stdout.
Options:
command (required) = Name of the command you want to execute
outfnm (optional) = Name of the output file name (overwritten if exists)
logfnm (optional) = Name of the log file name (appended if exists)
stdin (optional) = A string to be passed to stdin, as if it were typed (use newline character to mimic Enter key)
print_command = Whether to print the command.
copy_stdout = Copy the stdout stream; can set to False in strange situations
copy_stderr = Copy the stderr stream to the stdout stream; useful for GROMACS which prints out everything to stderr (argh.)
expand_cr = Whether to expand carriage returns into newlines (useful for GROMACS mdrun).
print_error = Whether to print error messages on a crash. Should be true most of the time.
persist = Continue execution even if the command gives a nonzero return code.
rbytes = Number of bytes to read from stdout and stderr streams at a time. GMX requires rbytes = 1 otherwise streams are interleaved. Higher values for speed.
"""
# Dictionary of options to be passed to the Popen object.
cmd_options = {'shell': isinstance(command, six.string_types), 'stdin': PIPE,
'stdout': PIPE, 'stderr': PIPE, 'universal_newlines': expand_cr, 'cwd': cwd}
# If the current working directory is provided, the outputs will be written to there as well.
if cwd is not None:
if outfnm is not None:
outfnm = os.path.abspath(os.path.join(cwd, outfnm))
if logfnm is not None:
logfnm = os.path.abspath(os.path.join(cwd, logfnm))
# "write to file" : Function for writing some characters to the log and/or output files.
def wtf(out):
if logfnm is not None:
with open(logfnm, 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
if outfnm is not None:
with open(outfnm, 'wb+' if wtf.first else 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
wtf.first = False
wtf.first = True
# Preserve backwards compatibility; sometimes None gets passed to stdin.
if stdin is None:
stdin = ""
if print_command:
logger.info("Executing process: \x1b[92m%-50s\x1b[0m%s%s%s%s\n" % (' '.join(command) if type(command) is list else command,
" In: %s" % cwd if cwd is not None else "",
" Output: %s" % outfnm if outfnm is not None else "",
" Append: %s" % logfnm if logfnm is not None else "",
(" Stdin: %s" % stdin.replace('\n', '\\n')) if stdin else ""))
wtf("Executing process: %s%s\n" % (command, (" Stdin: %s" %
stdin.replace('\n', '\\n')) if stdin else ""))
cmd_options.update(kwargs)
p = subprocess.Popen(command, **cmd_options)
# Write the stdin stream to the process.
p.stdin.write(stdin.encode('ascii'))
p.stdin.close()
#===============================================================#
#| Read the output streams from the process. This is a bit |#
#| complicated because programs like GROMACS tend to print out |#
#| stdout as well as stderr streams, and also carriage returns |#
#| along with newline characters. |#
#===============================================================#
# stdout and stderr streams of the process.
streams = [p.stdout, p.stderr]
# These are functions that take chunks of lines (read) as inputs.
def process_out(read):
if print_to_screen:
sys.stdout.write(str(read.encode('utf-8')))
if copy_stdout:
process_out.stdout.append(read)
wtf(read)
process_out.stdout = []
def process_err(read):
if print_to_screen:
sys.stderr.write(str(read.encode('utf-8')))
process_err.stderr.append(read)
if copy_stderr:
process_out.stdout.append(read)
wtf(read)
process_err.stderr = []
# This reads the streams one byte at a time, and passes it to the LineChunker
# which splits it by either newline or carriage return.
# If the stream has ended, then it is removed from the list.
with LineChunker(process_out) as out_chunker, LineChunker(process_err) as err_chunker:
while True:
to_read, _, _ = select(streams, [], [])
for fh in to_read:
if fh is p.stdout:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError(
"Failed to decode stdout from external process.")
if not read:
streams.remove(p.stdout)
p.stdout.close()
break
else:
try:
out_chunker.push(read)
break
except UnicodeDecodeError:
pass
elif fh is p.stderr:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError(
"Failed to decode stderr from external process.")
if not read:
streams.remove(p.stderr)
p.stderr.close()
break
else:
try:
err_chunker.push(read)
break
except UnicodeDecodeError:
pass
else:
raise RuntimeError
if len(streams) == 0:
break
p.wait()
process_out.stdout = ''.join(process_out.stdout)
process_err.stderr = ''.join(process_err.stderr)
_exec.returncode = p.returncode
if p.returncode != 0:
if process_err.stderr and print_error:
logger.warning("Received an error message:\n")
logger.warning("\n[====] \x1b[91mError Message\x1b[0m [====]\n")
logger.warning(process_err.stderr)
logger.warning("[====] \x1b[91mEnd o'Message\x1b[0m [====]\n")
if persist:
if print_error:
logger.info(
"%s gave a return code of %i (it may have crashed) -- carrying on\n" % (command, p.returncode))
else:
# This code (commented out) would not throw an exception, but instead exit with the returncode of the crashed program.
# sys.stderr.write("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n" % (command, p.returncode))
# sys.exit(p.returncode)
logger.error("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n\n" % (
command, p.returncode))
raise RuntimeError
# Return the output in the form of a list of lines, so we can loop over it using "for line in output".
Out = process_out.stdout.split('\n')
if Out[-1] == '':
Out = Out[:-1]
return Out
_exec.returncode = None
def warn_press_key(warning, timeout=10):
logger.warning(warning + '\n')
if sys.stdin.isatty():
logger.warning(
"\x1b[1;91mPress Enter or wait %i seconds (I assume no responsibility for what happens after this!)\x1b[0m\n" % timeout)
try:
rlist, wlist, xlist = select([sys.stdin], [], [], timeout)
if rlist:
sys.stdin.readline()
except:
pass
def warn_once(warning, warnhash=None):
""" Prints a warning but will only do so once in a given run. """
if warnhash is None:
warnhash = warning
if warnhash in warn_once.already:
return
warn_once.already.add(warnhash)
if type(warning) is str:
logger.info(warning + '\n')
elif type(warning) is list:
for line in warning:
logger.info(line + '\n')
warn_once.already = set()
#=========================================#
#| Development stuff (not commonly used) |#
#=========================================#
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,))
for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
# ob utils
ELEMENT_TABLE = elements.ElementData()
def getAllCoords(mol):
natoms = mol.OBMol.NumAtoms()
tmpcoords = np.zeros((natoms, 3))
for i in range(natoms):
a = mol.OBMol.GetAtom(i+1)
tmpcoords[i, :] = [a.GetX(), a.GetY(), a.GetZ()]
return tmpcoords
#def getAtomicNum(mol,i):
# a = mol.OBMol.GetAtom(i)
# return a.GetAtomicNum()
#
#def getAtomicSymbols(mol):
# natoms = mol.OBMol.NumAtoms()
# atomic_nums = [ getAtomicNum(mol,i+1) for i in range(natoms) ]
# atomic_symbols = [ ELEMENT_TABLE.from_atomic_number(i).symbol for i in atomic_nums ]
# return atomic_symbols
#
#def make_mol_from_coords(coords,atomic_symbols):
# mol = ob.OBMol()
# for s,xyz in zip(atomic_symbols,coords):
# i = mol.NewAtom()
# a = ELEMENT_TABLE.from_symbol(s).atomic_num
# i.SetAtomicNum(a)
# i.SetVector(xyz[0],xyz[1],xyz[2])
# return pb.Molecule(mol)
def cartesian_product2(arrays):
""" Form a Cartesian product of two NumPy arrays. """
la = len(arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=np.int32)
for i, a in enumerate(np.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, la)
|
video.py
|
import json
import cv2
import numpy as np
import random
import numbers
import os
from skimage import io
from tqdm import tqdm
import math
from collections import defaultdict
import matplotlib.pyplot as plt
import bisect
from skimage import io
import pandas as pd
from concurrent import futures
from plotbee.utils import read_json, get_fname
from multiprocessing import Process, Queue, Lock
from plotbee.frame import Frame
from plotbee.body import Body
from plotbee.body import parse_parts
from plotbee.track import Track
from plotbee.utils import save_json, rescale_image
from plotbee.tracking import hungarian_tracking, sort_tracking, non_max_supression_video, hungarian_tracking_with_prediction
from plotbee.events import track_classification
from plotbee.tag import detect_tags_on_video
from plotbee.video_wrapper import VideoCaptureWrapper
# from plotbee.tag import match_tags
from plotbee.videoplotter import extract_body
# from plotbee.pollen import process_pollen
# from plotbee.utils import divide_video, merge_videos
from tensorflow.keras.models import model_from_json, load_model
import tensorflow as tf
import multiprocessing as mp
SIZE=(375, 450)
def divide_video(video, fname, N):
frames = len(video)
batch = frames//N
fpath, ext = os.path.splitext(fname)
filenames = list()
for i in range(N):
start = i * batch
end = (i + 1) * batch
if end > frames:
end = frames
v = video[start:end]
path = fpath + "_" + str(i) + ext
v.save(path)
filenames.append(path)
return filenames
def merge_videos(video_names):
v = Video.load(video_names[0])
folder, file = os.path.split(video_names[0])
pfname, ext = os.path.splitext(file)
pfname = "_".join(pfname.split("_")[:-1]) + ext
for pname in video_names[1:]:
vi = Video.load(pname)
v.append(vi)
out_filename = os.path.join(folder, pfname)
v.save(out_filename)
return out_filename
def load_json_model(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return model_from_json(data)
def load_pollen_model(model_path):
path, ext = os.path.splitext(model_path)
if ext.lower() == "json":
model = load_json_model(model_path)
else:
model = load_model(model_path, compile=False)
return model
def preprocess_input(image, rescale_factor=1):
image_height, image_width, _ = image.shape
dim = (image_width//rescale_factor, image_height//rescale_factor)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2.resize(image, dim)
image = cv2.normalize(image,dst=image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
return image
def tfv2_pollen_classifier(video_filename, model_path, weigths_path, gpu, gpu_fraction, model_size=2048, scale=1.0):
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=gpu
physical_devices = tf.config.list_physical_devices('GPU')
print(physical_devices, gpu)
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(physical_devices[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=model_size)])
folder = '/'.join(video_filename.split('/')[:-1])
model = load_pollen_model(model_path)
if weigths_path != None:
model.load_weights(weigths_path)
video_data = Video.load(video_filename)
# start = video_data[0].id
# print(start)
video = video_data.get_video_stream()
data = list()
Body.scale = scale
Body.width=360
Body.height=360
Body.out_width = 90
Body.out_height = 90
for i, frame in enumerate(tqdm(video_data, desc=video_filename)):
ret, im = video.read()
if not ret:
print("Something wrong with the video.")
bodies, images = Frame._extract_bodies_images(im, frame)
images = np.array(images)/255.
try:
score=model.predict_on_batch(images)
except:
# print(images.shape)
# print(frame)
continue
for body, pscore in zip(bodies, score):
body.pollen_score = float(pscore[0])
video.release()
video_data.save(video_filename)
return
def process_pollen(video, model_path, model_weights, workers=4, gpus=["1", "0"], model_size=2048, scale=1.0):
tmp_folder = "pollen_temp"
os.makedirs(tmp_folder, exist_ok=True)
pollen_path = os.path.join(tmp_folder, "pollen_temp.json")
frames = len(video)
processes = dict()
# Divide current video into N temp_files
filenames = divide_video(video, pollen_path, workers)
# Process each file with pollen classification
for i, file in enumerate(filenames):
gpu = gpus[i % len(gpus)]
processes[file] = mp.Process(target=tfv2_pollen_classifier,args= (file, model_path, model_weights, gpu, (1*len(gpus))/workers, model_size, scale))
processes[file].start()
for k in processes:
processes[k].join()
# Merge files
fname = merge_videos(filenames)
return Video.load(fname)
def find_connections(point, part, mappings):
skleton = list(mappings.keys())
points = defaultdict(list)
buffer = [(point, part)]
while len(buffer) != 0:
p, pt = buffer.pop()
for limb in skleton:
if pt in limb:
target_part = limb[0] if limb[0] != pt else limb[1]
indices = [i for i, x in enumerate(mappings[limb][pt]) if x == p]
for indx in indices:
target_point = mappings[limb][target_part][indx]
# check if not in points
if target_point not in points[target_part]:
buffer.append((target_point, target_part))
points[target_part].append(target_point)
return points
def get_mappings_by_limb(maps):
detsByLimbs = defaultdict(lambda: defaultdict(list))
for m in maps:
detsByLimbs[tuple(m[5])][m[5][0]].append(tuple(m[0]))
detsByLimbs[tuple(m[5])][m[5][1]].append(tuple(m[1]))
return detsByLimbs
def point_in_frame(track_id, id_tracks, frame_id):
track_info = id_tracks[str(int(track_id))]
init_frame = track_info["init_frame"]
track_points = track_info["positions"]
track_position = frame_id - init_frame
x = track_points[track_position][0]
y = track_points[track_position][1]
return (x, y)
def find_bodyid(body_point, tracks, id_tracks, frame_id):
for track_id in tracks:
if track_id == 0:
continue
track_point = point_in_frame(track_id, id_tracks, frame_id)
if track_point == body_point:
return track_id
return -1
def create_bodies_from_mapping(tracking_limb, tracking_part, mapping, tracks, id_tracks, frame):
tracking_points = mapping[tracking_limb][tracking_part]
skeleton = list(mapping.keys())
bodies = list()
for point in tracking_points:
body_id = -1
if tracks is not None:
body_id = find_bodyid(point, tracks, id_tracks, frame.id)
body_parts = find_connections(point, tracking_part, mapping)
bodies.append(Body(body_parts, tracking_part,
tracking_limb, skeleton,
frame, body_id=body_id))
return bodies
def frames_from_detections(detections, tracks, id_tracks, tracking_limb=(1, 3), tracking_part=3, video_path=None, load_image=False):
"""
Create Video from Ivan's detections and tracks JSON objects
"""
frame_list = list()
prev_track = defaultdict(lambda: None)
track_dict = dict()
dets_size = len(detections.keys())
image = None
if load_image:
vid = cv2.VideoCapture(video_path)
for frame_id in tqdm(range(dets_size)):
str_id = str(frame_id)
if str_id not in detections:
continue
data = detections[str_id]
frametracks = None
if tracks is not None:
frametracks = tracks[frame_id]
mappings = get_mappings_by_limb(data["mapping"])
parts = data["parts"]
if load_image:
res, image = vid.read()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frameobj = Frame([], frame_id, image, mappings, parts)
bodies = create_bodies_from_mapping(tracking_limb, tracking_part, mappings, frametracks, id_tracks, frameobj)
if tracks is not None:
for b in bodies:
if b.id not in track_dict:
track_dict[b.id] = Track(b)
b.prev = prev_track[b.id]
if prev_track[b.id] is not None:
prev_track[b.id].next = b
prev_track[b.id] = b
frameobj.update(bodies)
frame_list.append(frameobj)
if load_image:
vid.release()
return frame_list, track_dict
def image_from_video(video_path, frameid):
video = cv2.VideoCapture(video_path)
video.set(cv2.CAP_PROP_POS_FRAMES, frameid)
res, im = video.read()
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
video.release()
return im
def get_video(video_path, start=0, end=np.inf, step=1):
video = cv2.VideoCapture(video_path)
video.set(cv2.CAP_PROP_POS_FRAMES, start)
return video
def process_video(frames, video_path, start, end, img_folder, file_format, lock, pbar):
with lock:
print('Starting Consumer => {}'.format(os.getpid()))
vid = cv2.VideoCapture(video_path)
vid.set(cv2.CAP_PROP_POS_FRAMES, start)
for frame in frames:
vid.set(cv2.CAP_PROP_POS_FRAMES, frame.id)
ret, image = vid.read()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
process_frame(image, frame, img_folder, file_format)
with lock:
pbar.update(1)
def process_frame(image, frame, img_folder, file_format):
body_info = list()
for body in frame:
if not body.valid:
continue
# info = body.info()
filename = file_format.format(body.frameid, body.id)
body_filename = os.path.join(img_folder, filename)
if body.tag is not None:
pts = np.array(body.tag["p"]).astype(np.int32)
pts = pts.reshape((-1,1,2))
image = cv2.fillPoly(image,[pts], (0,0,0))
image = cv2.polylines(image,[pts],True,(0,0,0),35)
# info["filename"] = body_filename
# body.save(body_filename, width=width, height=height, cX=cX, cY=cY)
im = extract_body(image, body, width=Body.width, height=Body.height,
cX=Body.cX, cY=Body.cY)
io.imsave(body_filename, im)
def process_frame_consumer(video_path, q, lock, pbar):
with lock:
print('Starting Consumer => {}'.format(os.getpid()))
video = cv2.VideoCapture(video_path)
while True:
params = q.get()
if params == "Done!":
break
image = video.set(cv2.CAP_PROP_POS_FRAMES, params[0].id)
extract_body(image, *params)
with lock:
pbar.update(1)
video.release()
with lock:
print(' Exit Consumer => {}'.format(os.getpid()))
return
def dist(a, b):
npa = np.array([a])
npb = np.array([b])
return np.sqrt(np.sum((npa - npb)**2))
def match_tags(frame, tag_list, virtual=False, th_dist=50):
for tag in tag_list:
min_dist = th_dist
closest_body = None
for body in frame:
if body.tag is not None:
continue
d = dist(body.center, tag['c'])
if d < min_dist:
min_dist = d
closest_body = body
if closest_body is not None:
closest_body.tag = tag
elif virtual:
# Add new body with the tag as thorax
x, y = tag['c']
body = Body({3: [(x,y)]}, center=3,
connections=[],angle_conn=[3,3],
frame=frame,tag=tag,body_id=-1)
frame.update([body])
class Video():
@classmethod
def from_config(cls, config, load_image=False):
try:
detection_path = config['DETECTIONS_PATH']
video_path = config['VIDEO_PATH']
except KeyError as e:
raise Exception('You should provide an {} with the config.'.format(e))
else:
tracks_json = None
id_tracks_json = None
if 'TRACK_PATH' in config:
track_path = config['TRACK_PATH']
try:
id_track_path = config['ID_TRACK_PATH']
except KeyError as e:
raise Exception('You should provide an {} with the config.'.format(e))
tracks_json = read_json(track_path)
id_tracks_json = read_json(id_track_path)
dets = read_json(detection_path)
frames, tracks = frames_from_detections(dets, tracks_json, id_tracks_json,
video_path=video_path, load_image=load_image)
return cls(frames, tracks, config)
@classmethod
def load(cls, json_path):
data = read_json(json_path)
config = data["config"]
start = 0
stop = None
step = 1
# to support old versions
if "start" in data:
start = data["start"]
if "stop" in data:
stop = data["stop"]
if "step" in data:
step = data["step"]
frames = list()
track_dict = dict()
prev_track = defaultdict(lambda: None)
for frame in tqdm(data["frames"]):
bodies = list()
frameobj = Frame([], frame["id"], parts=frame["parts"])
for body in frame["bodies"]:
bodies.append(Body.load_body(body, frameobj))
# Compatible with older versions
# if "tag" not in body:
# body["tag"] = None
# parsed_parts = parse_parts(body["parts"])
# bodies.append(Body(parsed_parts, body["center_part"],
# tuple(body["angle_conn"]), body["connections"],
# frameobj, body["id"], body["suppressed"], body["pollen_score"], body["tag"]))
for b in bodies:
if b.id == -1:
continue
if b.id not in track_dict:
track_dict[b.id] = Track(b)
b.prev = prev_track[b.id]
if prev_track[b.id] is not None:
prev_track[b.id].next = b
track_dict[b.id].endframe = b.frameid
track_dict[b.id]._data[b.frameid] = b
prev_track[b.id] = b
frameobj.update(bodies)
frames.append(frameobj)
# for i, tr in track_dict.items():
# tr.init()
video = cls(frames, track_dict, config, start=start, stop=stop, step=step)
video.track_clasification()
return video
@classmethod
def from_detections(cls, detections, video_path=None, load_images=False):
frames, tracks = frames_from_detections(detections, None, None,
video_path=video_path, load_image=load_images)
return cls(frames, tracks, dict())
def __init__(self, frames, tracks, config, start=0, stop=None, step=1):
self._frames = frames
self._tracks = tracks
self._start = start
self._stop = stop
if not stop:
self._stop = len(self._frames)
self._step = step
for frame in self._frames:
frame.set_video(self)
self._config = config
@property
def config(self):
return self._config
@property
def frames(self):
return self._frames
@property
def tracks(self):
return self._tracks
@property
def video_path(self):
return self.config['VIDEO_PATH']
@property
def video_name(self):
folder, video_name = os.path.split(self.config['VIDEO_PATH'])
return video_name
def frame_image(self, index):
return image_from_video(self.video_path, index)
def _get_frame(self, frame_id):
return self._frames[frame_id]
def get_video_stream(self, start=None, stop=None, step=None):
if step is None:
step = self._step
if start is None:
start = self._start
if stop is None:
stop = self._stop
return VideoCaptureWrapper(self.video_path, start=start, stop=stop, step=step)
def __repr__(self):
detection_fname = None
video_fname = get_fname(self._config['VIDEO_PATH'])
if 'DETECTIONS_PATH' in self._config:
detection_fname = get_fname(self._config['DETECTIONS_PATH'])
return "Video(name={}, detections={}, len={})".format(video_fname,
detection_fname, len(self))
def __len__(self):
return len(self._frames)
def append(self, video):
if self.config == video.config:
self._frames += video._frames
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
start = index.start
stop = index.stop
step = index.step
if index.step is None:
step = self._step
if index.start is None:
start = self._start
if index.stop is None:
stop = self._stop
return cls(self._get_frame(index), self._tracks, self.config, start=start, stop=stop, step=step)
elif isinstance(index, numbers.Integral):
return self._get_frame(index)
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
def export(self, path, skeleton=True, bbox=True, tracks=True, events=False, min_parts=-1, max_workers=5, idtext=False, fontScale=2.5, fontThickness=8):
os.makedirs(path, exist_ok=True)
# Parallel Implementaion
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_frames = list()
for frame in self.frames:
future = executor.submit(frame.save, path, skeleton, bbox,
tracks, events, min_parts,
idtext=idtext, fontScale=fontScale,
fontThickness=fontThickness)
future_frames.append(future)
done_iter = futures.as_completed(future_frames)
done_iter = tqdm(done_iter, total=len(self.frames))
for future in done_iter:
_ = 1+1
return
def clear_ids(self):
for frame in self:
for body in frame:
body.set_id(-1)
body.prev = None
body.next = None
def non_max_supression(self, nms_overlap_fraction=0.6):
non_max_supression_video(self, nms_overlap_fraction)
def hungarian_tracking(self, predictive=True, **kwargs):
if predictive:
hungarian_tracking_with_prediction(self, **kwargs)
else:
hungarian_tracking(self, **kwargs)
def sort_tracking(self, bbox=200, nms_overlap_fraction=0.6):
sort_tracking(self, bbox, nms_overlap_fraction)
def track_clasification(self, **kwargs):
track_classification(self, **kwargs)
def tag_detection(self, max_workers=5):
detect_tags_on_video(self, max_workers=max_workers)
def events_counter(self):
event_counter = defaultdict(int)
for track in self.tracks.values():
if track.event is not None:
event_counter["event." + track.event] += 1
if track.track_shape is not None:
event_counter["trackshape." + track.track_shape] += 1
if track.pollen:
event_counter["pollen"] += 1
event_counter["tracks"] = len(self.tracks)
return event_counter
# def export_bodies(self, folder, width=None, height=None, cX=None, cY=None, workers=5):
# """Parallel Implementation of bodies Export"""
# file_format = "{:09d}_{:09d}.jpg"
# os.makedirs(folder, exist_ok=True)
# img_folder = os.path.join(folder, "images")
# cvs_filename = os.path.join(folder, "dataset.csv")
# json_filename = os.path.join(folder, "dataset.json")
# os.makedirs(img_folder, exist_ok=True)
# body_info = []
# with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# future_frames = list()
# for frame in self.frames:
# future = executor.submit(process_frame, image, frame, img_folder, file_format,
# width, height, cX, cY)
# future_frames.append(future)
# done_iter = futures.as_completed(future_frames)
# done_iter = tqdm(done_iter, total=len(self.frames))
# for future in done_iter:
# body_info += future.result()
# df = pd.DataFrame(body_info)
# df.to_csv(cvs_filename, index=False)
# self.save(json_filename)
# return
def export_bodies(self, folder, width=None, height=None, cX=None, cY=None, workers=5):
"""Parallel Implementation of bodies Export"""
file_format = "{:09d}_{:09d}.jpg"
video_name, ext = os.path.splitext(self.video_name)
os.makedirs(folder, exist_ok=True)
img_folder = os.path.join(folder, video_name)
# cvs_filename = os.path.join(folder, "dataset.csv")
json_filename = os.path.join(folder, video_name + ".json")
os.makedirs(img_folder, exist_ok=True)
body_info = []
video_size = len(self)
chunksize = video_size//workers
lock = Lock()
ws = list()
pbar = list()
for w in range(workers - 1):
start = w * chunksize
end = (w + 1) * chunksize
pbar.append(tqdm(total=chunksize))
p = Process(target=process_video, args=(self[start:end], self.video_path, start, end, img_folder, file_format, lock, pbar[w]))
p.deamon = True
ws.append(p)
p.start()
start = (workers - 1)* chunksize
end = len(self)
pbar.append(tqdm(total=end-start))
p = Process(target=process_video, args=(self[start:end], self.video_path, start, end, img_folder, file_format, lock, pbar[workers -1]))
p.deamon = True
ws.append(p)
p.start()
for w in ws:
w.join()
self.save(json_filename)
return
def json(self):
video_json = dict()
video_json["config"] = self._config
video_json["start"] = self._start
video_json["stop"] = self._stop
video_json["step"] = self._step
video_json["frames"] = list()
for frame in tqdm(self._frames):
frame_info = dict()
frame_info["id"] = frame.id
frame_info["parts"] = frame._parts
frame_bodies = list()
for body in frame:
frame_bodies.append(body.params())
frame_info["bodies"] = frame_bodies
video_json["frames"].append(frame_info)
return video_json
def save(self, path):
video_json = self.json()
save_json(path, video_json)
def clear_tags(self):
for frame in self:
for body in frame:
body.tag = None
def load_tags(self, tags_file, virtual=False):
tags = read_json(tags_file)
self.clear_tags()
for frame in self:
sid = str(frame.id)
if sid in tags["data"]:
tagged_bees = tags["data"][sid]['tags']
match_tags(frame, tagged_bees, virtual=virtual, th_dist=50)
return
def load_video(self, video_file):
if os.path.exists(video_file):
self.config['VIDEO_PATH'] = video_file
else:
raise ValueError
def tagged(self):
tagged = list()
for frame in self:
for body in frame:
if body.tag is not None:
tagged.append(body)
return tagged
def get_frame_with_untracked_body(self):
for frame in self:
for body in frame:
if body.id == -1:
return frame
def export_tagged(self, output_folder, save_images=True):
tag_bodies = self.tagged()
_, video_name = os.path.split(self.video_path)
video_name, ext = os.path.splitext(video_name)
out_path = os.path.join(output_folder, video_name)
os.makedirs(out_path, exist_ok=True)
json_path = os.path.join(output_folder, "{}.json".format(video_name))
tagged_json= list()
for body in tqdm(tag_bodies):
tagged_json.append(body.params())
if save_images:
fname = "TID{:05}_F{:08}.jpg".format(body.tag_id, body.frameid)
fpath = os.path.join(out_path, fname)
body.save(fpath)
# print("body saved.")
save_json(json_path, tagged_json)
return
def export_pollen(self, output_folder, limit=None, sorted_scores=False):
bodies = [body for frame in self for body in frame]
bodies = sorted(bodies, key=(lambda b: b.pollen_score))
def valid(body):
x, y = body.center
if (x > 500 and x < 2100) and (y > 500 and y < 800):
return True
return False
bodies = [body for body in bodies if valid(body)]
if limit and sorted_scores:
bodies = bodies[:limit//2] + bodies[-limit//2:]
elif limit:
pollen_scores = [body.pollen_score for body in bodies]
threshold_index = bisect.bisect(pollen_scores, 0.5)
no_pollen_bodies = bodies[:threshold_index]
pollen_bodies = bodies[threshold_index:]
random.shuffle(no_pollen_bodies)
random.shuffle(pollen_bodies)
bodies = no_pollen_bodies[:limit//2] + pollen_bodies[:limit//2]
_, video_name = os.path.split(self.video_path)
video_name, ext = os.path.splitext(video_name)
print(video_name)
out_path = os.path.join(output_folder, video_name)
os.makedirs(out_path, exist_ok=True)
pollen_path = os.path.join(out_path, "P")
os.makedirs(pollen_path, exist_ok=True)
nopollen_path = os.path.join(out_path, "NP")
os.makedirs(nopollen_path, exist_ok=True)
pollen_csv = os.path.join(out_path, "pollen.csv")
fnames = list()
pollen_scores = list()
xs = list()
ys = list()
for body in tqdm(bodies):
x, y = body.center
pollen_score = body.pollen_score
fname = "{:09}_X{:04}_Y{:04}.jpg".format(body.frameid, x, y)
if pollen_score < 0.5:
fpath = os.path.join(nopollen_path, fname)
else:
fpath = os.path.join(pollen_path, fname)
fnames.append(fpath)
pollen_scores.append(pollen_score)
xs.append(x)
ys.append(y)
body.save(fpath)
df_dict = {
"filename":fnames,
"pollen": pollen_scores,
"x":xs,
"y":ys
}
df = pd.DataFrame(df_dict)
df.to_csv(pollen_csv, index=False)
return
def process_pollen(self, model_path, weights=None, workers=4, gpus=["1", "0"], model_size=2048, scale=1.0):
pollen_video = process_pollen(self, model_path, weights, workers=workers, gpus=gpus, model_size=model_size, scale=scale)
self._frames = pollen_video._frames
self._tracks = pollen_video._tracks
for frame in self._frames:
frame.set_video(self)
self._config = pollen_video._config
return
|
log_board.py
|
import time
import os
import re
from pathlib import Path
import threading
import logging
log = logging.getLogger(__name__)
class LogHSMercs:
def __init__(self, logpath):
"""generator function that yields new lines in filelog to
follow cards in hand and on the battlefield
"""
self.logpath = logpath
Path(logpath).touch(exist_ok=True)
self.cardsInHand = []
self.myBoard = {}
self.mercsId = {}
def follow(self):
# go to the end of the file
self.logfile.seek(0, os.SEEK_END)
regexBoard = (
".+? tag=ZONE_POSITION "
".+?entityName=(.+?) "
"id=(.+?) "
".+?zonePos=(.) "
"cardId=.+? "
"player=1\] .+? "
"dstPos=(.)"
)
# start infinite loop to read log file
while self.__running:
# read last line of file
line = self.logfile.readline()
# sleep if file hasn't been updated
if not line:
time.sleep(0.1)
continue
if "ZoneChangeList.ProcessChanges() - processing" in line and re.search(
regexBoard, line
):
(mercenary, mercId, srcpos, dstpos) = re.findall(regexBoard, line)[0]
self.mercsId[mercId] = mercenary
# srcpos = actual position.
# =0 if it hasn't any previous position
if (
srcpos != "0"
and srcpos in self.myBoard
and self.myBoard[srcpos] == mercId
):
self.myBoard.pop(srcpos)
# dstpos = 0 if the card is going to GRAVEYARD
if dstpos != "0":
self.myBoard[dstpos] = mercId
def start(self):
log.debug("Reading logfile: %s", self.logpath)
self.logfile = open(self.logpath, "r")
self.__running = True
t1 = threading.Thread(target=self.follow)
self.thread = t1
t1.start()
def stop(self):
# self.thread.stop()
log.debug("Closing logfile: %s", self.logpath)
self.__running = False
self.cleanHand()
self.cleanBoard()
self.logfile.close()
def cleanHand(self):
self.cardsInHand = []
def getHand(self):
return self.cardsInHand
def cleanBoard(self):
self.myBoard = {}
self.mercsId = {}
def getBoard(self):
board = {}
for key in self.myBoard.keys():
board[key] = self.mercsId[self.myBoard[key]]
return board
|
PyShell.py
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) is not str:
if not isinstance(s, str):
raise TypeError('must be str, not ' + type(s).__name__)
# See issue #19481
s = str.__str__(s)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if macosxSupport.isAquaTk():
# There are some screwed up <2> class bindings for text
# widgets defined in Tk which we need to do away with.
# See issue #24801.
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
server.py
|
import socket
import tools.helper as helper
import threading
import time
hp = helper.Helper(helper._FORMAT)
class Server:
def __init__(self,SERVER:str,PORT:int):
self._PORT = PORT
self._SERVER = SERVER
self._ADDR = (SERVER,PORT)
self._SSOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._RUN = False
self._ACTIVES = {}
self._Threads = []
self._shutReq = False
def __send(self,cmd:str,ct:int):
print("[ ! ] Send Response triggered !")
conn =1
for sock in self._ACTIVES:
if ct == conn:
print(f"[SOCKET_NAME] {conn}")
sock.send(hp.utf8len(cmd))
sock.send(cmd.encode(helper._FORMAT))
print("[SENT] Message has sent ")
break
conn+=1
def __sendAll(self,cmd:str):
for online in self._ACTIVES:
online.send(hp.utf8len(cmd)),
online.send(cmd.encode(helper._FORMAT))
def __listClients(self):
ct = 1
print("-- ACTIVE CLIENTS --\n")
for cl in self._ACTIVES:
print(f"[{ct}] {self._ACTIVES[cl][1]}")
ct += 1
print("\n-------------------- ")
return len(self._ACTIVES)
def __close_server(self):
while True:
if self._shutReq:
print("[ ! ] All clients warned. ")
self.__sendAll(helper._DISC)
time.sleep(2)
for cli in self._ACTIVES:
cli.close()
self._ACTIVES.clear()
if not self._ACTIVES:
print("\n[ALL KILLED]")
self._shutReq = False
#break
def __connexMan(self):
while self._RUN:
try:
conn, addr = self._SSOCK.accept()
self._ACTIVES[conn] = addr
print(f"\n[NEW CONNECTION] {addr[1]} connected.")
except socket.error as err:
print(f"[ERROR-1] {err}")
print("[CLOSING]")
clientHandler = threading.Thread(target=self.__clientBridge__,args=(conn,addr),daemon=True)
clientHandler.start()
def __clientBridge__(self,conn:socket.socket,addr):
while not self._shutReq:
try:
msg_len = conn.recv(helper._HEADER).decode(helper._FORMAT)
if msg_len:
msg = conn.recv(int((msg_len))).decode(helper._FORMAT)
if msg == helper._DISC:
print(f"\n[{addr[1]}] DISCONNECTED")
conn.close()
del self._ACTIVES[conn]
break
else:
print(f"\n[{addr[1]}] {msg}")
except socket.error as err:
print(f"[ERROR-2] {err}")
break
def __sendResponse(self):
while self._RUN:
try:
cmd = input("cmd: > ")
if cmd:
if cmd == helper._DISC:
print("[CLOSING] server will be closed ...")
self._shutReq = True
time.sleep(2)
elif cmd.split()[0] == "!c":
self.__listClients()
elif cmd.split()[0] == "!a":
cmd = input("send: ")
self.__sendAll(cmd)
elif cmd.split()[0] == "sel":
if self.__listClients():
ct = int(input(": > "))
cmd = input("send: ")
self.__send(cmd,ct)
else:
print("\n[ ! ] No Online Client Exist")
except socket.error as err:
print(f"[ERROR-3] {err}")
self._shutReq = True
break
def run(self):
self._SSOCK.bind(self._ADDR)
print("[STARTING] server is starting...")
self._SSOCK.listen()
print(f"[LISTENING] Server is listening on {self._SERVER}")
self._RUN = True
sendTh = threading.Thread(target=self.__sendResponse,daemon=False)
sendTh.start()
print("[READY] server is able to commands")
connexTh = threading.Thread(target=self.__connexMan,daemon=True)
connexTh.start()
threading.Thread(target=self.__close_server,daemon=False).start()
if __name__ == "__main__":
s = Server(socket.gethostbyname(socket.gethostname()),5050)
s.run()
#input("Done")
|
stats.py
|
""" XVM (c) www.modxvm.com 2013-2017 """
#############################
# Command
def getBattleStat(args, respondFunc):
_stat.enqueue({
'func': _stat.getBattleStat,
'cmd': XVM_COMMAND.AS_STAT_BATTLE_DATA,
'respondFunc': respondFunc,
'args': args})
_stat.processQueue()
def getBattleResultsStat(args):
_stat.enqueue({
'func': _stat.getBattleResultsStat,
'cmd': XVM_COMMAND.AS_STAT_BATTLE_RESULTS_DATA,
'args': args})
_stat.processQueue()
def getUserData(args):
_stat.enqueue({
'func': _stat.getUserData,
'cmd': XVM_COMMAND.AS_STAT_USER_DATA,
'args': args})
_stat.processQueue()
def getClanIcon(vehicleID):
return _stat.getClanIcon(vehicleID)
#############################
# Private
import os
from pprint import pprint
import datetime
import traceback
import time
from random import randint
import threading
import uuid
import imghdr
import BigWorld
from helpers import dependency
from skeletons.gui.battle_session import IBattleSessionProvider
from gui.app_loader import g_appLoader
from items.vehicles import VEHICLE_CLASS_TAGS
from xfw import *
import xfw.utils as xfwutils
import config
from consts import *
import filecache
from logger import *
import topclans
import utils
import vehinfo
import vehinfo_xtdb
import vehinfo_xte
import xvm_scale
import xvmapi
#############################
class _Stat(object):
def __init__(self):
player = BigWorld.player()
self.queue = [] # HINT: Since WoT 0.9.0 use Queue() leads to Access Violation after client closing
self.lock = threading.RLock()
self.thread = None
self.req = None
self.resp = None
self.arenaId = None
self.players = {}
self.cacheBattle = {}
self.cacheUser = {}
self._loadingClanIconsCount = 0
def enqueue(self, req):
with self.lock:
self.queue.append(req)
def dequeue(self):
with self.lock:
return self.queue.pop(0) if self.queue else None
def getClanIcon(self, vehicleID):
# Load order: id -> nick -> srv -> clan -> default clan -> default nick
pl = self.players.get(vehicleID, None)
if not pl:
return None
# Return cached path
if hasattr(pl, 'clanicon'):
return pl.clanicon
def paths_gen():
# Search icons
prefix = 'res_mods/mods/shared_resources/xvm/res/{}'.format(
xfwutils.fix_path_slashes(config.get('battle/clanIconsFolder')))
yield '{}ID/{}.png'.format(prefix, pl.accountDBID)
yield '{}{}/nick/{}.png'.format(prefix, GAME_REGION, pl.name)
if hasattr(pl, 'x_emblem'):
yield pl.x_emblem
if pl.clan:
yield '{}{}/clan/{}.png'.format(prefix, GAME_REGION, pl.clan)
yield '{}{}/clan/default.png'.format(prefix, GAME_REGION)
yield '{}{}/nick/default.png'.format(prefix, GAME_REGION)
for fn in paths_gen():
if os.path.isfile(fn):
pl.clanicon = utils.fixImgTag('xvm://' + fn[len('res_mods/mods/shared_resources/xvm/'):])
return pl.clanicon
pl.clanicon = None
return pl.clanicon
def processQueue(self):
#debug('processQueue')
with self.lock:
if self.thread is not None:
#debug('already working')
return
#debug('dequeue')
self.req = self.dequeue()
if self.req is None:
#debug('no req')
return
self.resp = None
self.thread = threading.Thread(target=self.req['func'])
self.thread.daemon = False
self.thread.start()
# self.req['func']()
#debug('start')
# self._checkResult()
BigWorld.callback(0, self._checkResult)
def _checkResult(self):
with self.lock:
debug("checkResult: " + ("no" if self.resp is None else "yes"))
if self.thread is not None:
self.thread.join(0.01) # 10 ms
if self.resp is None:
BigWorld.callback(0.1, self._checkResult)
return
try:
self._respond()
except Exception:
err(traceback.format_exc())
finally:
#debug('done')
if self.thread:
#debug('join')
self.thread.join()
#debug('thread deleted')
self.thread = None
# self.processQueue()
BigWorld.callback(0, self.processQueue)
def _respond(self):
debug("respond: " + self.req['cmd'])
self.resp = unicode_to_ascii(self.resp)
func = self.req.get('respondFunc', as_xfw_cmd)
func(self.req['cmd'], self.resp)
# Threaded
def getBattleStat(self, tries=0):
try:
player = BigWorld.player()
if player.__class__.__name__ == 'PlayerAvatar' and player.arena is not None:
self._get_battle()
return # required to prevent deadlock
else:
debug('WARNING: arena not created, but getBattleStat() called')
# # Long initialization with high ping
# if tries < 5:
# time.sleep(1)
# self.getBattleStat(tries+1)
except Exception:
err(traceback.format_exc())
with self.lock:
if not self.resp:
self.resp = {}
def getBattleResultsStat(self):
try:
player = BigWorld.player()
if player.__class__.__name__ == 'PlayerAccount':
self._get_battleresults()
return # required to prevent deadlock
except Exception:
err(traceback.format_exc())
with self.lock:
if not self.resp:
self.resp = {}
def getUserData(self):
try:
self._get_user()
return # required to prevent deadlock
except Exception:
err(traceback.format_exc())
with self.lock:
if not self.resp:
self.resp = {}
def _get_battle(self):
player = BigWorld.player()
if player.arenaUniqueID is None or self.arenaId != player.arenaUniqueID:
self.arenaId = player.arenaUniqueID
self.players = {}
# update players
self._loadingClanIconsCount = 0
vehicles = BigWorld.player().arena.vehicles
for (vehicleID, vData) in vehicles.iteritems():
if vehicleID not in self.players:
pl = _Player(vehicleID, vData)
self._load_clanIcon(pl)
# cleanup same player with different vehicleID (bug?)
self.players = {k:v for k,v in self.players.iteritems() if v.accountDBID != pl.accountDBID}
self.players[vehicleID] = pl
self.players[vehicleID].update(vData)
# sleepCounter = 0
while self._loadingClanIconsCount > 0:
time.sleep(0.01)
# # FIX: temporary workaround
# sleepCounter += 1
# if sleepCounter > 1000: # 10 sec
# log('WARNING: icons loading too long')
# break;
playerVehicleID = player.playerVehicleID if hasattr(player, 'playerVehicleID') else 0
self._load_stat(playerVehicleID)
players = {}
for (vehicleID, pl) in self.players.iteritems():
cacheKey = "%d=%d" % (pl.accountDBID, pl.vehCD)
if cacheKey not in self.cacheBattle:
cacheKey2 = "%d" % pl.accountDBID
if cacheKey2 not in self.cacheBattle:
self.cacheBattle[cacheKey] = self._get_battle_stub(pl)
stat = self.cacheBattle[cacheKey]
self._fix(stat)
players[pl.name] = stat
# pprint(players)
with self.lock:
self.resp = {'players': players}
def _get_battleresults(self):
(arenaUniqueId,) = self.req['args']
player = BigWorld.player()
player.battleResultsCache.get(int(arenaUniqueId), self._battleResultsCallback)
def _battleResultsCallback(self, responseCode, value=None, revision=0):
try:
if responseCode < 0:
with self.lock:
self.resp = {}
return
# pprint(value)
self.players = {}
# update players
for (vehicleID, vData) in value['vehicles'].iteritems():
accountDBID = vData[0]['accountDBID']
plData = value['players'][accountDBID]
vData = {
'accountDBID': accountDBID,
'name': plData['name'],
'clanAbbrev': plData['clanAbbrev'],
'typeCompDescr': vData[0]['typeCompDescr'],
'team': vData[0]['team']}
self.players[vehicleID] = _Player(vehicleID, vData)
self._load_stat(0)
players = {}
for (vehicleID, pl) in self.players.iteritems():
cacheKey = "%d=%d" % (pl.accountDBID, pl.vehCD)
if cacheKey not in self.cacheBattle:
cacheKey2 = "%d" % pl.accountDBID
if cacheKey2 not in self.cacheBattle:
self.cacheBattle[cacheKey] = self._get_battle_stub(pl)
stat = self.cacheBattle[cacheKey]
self._fix(stat)
players[pl.name] = stat
# pprint(players)
with self.lock:
self.resp = {'arenaUniqueId': str(value['arenaUniqueID']), 'players': players}
except Exception:
err(traceback.format_exc())
print('=================================')
print('_battleResultsCallback() exception: ' + traceback.format_exc())
pprint(value)
print('=================================')
with self.lock:
self.resp = {}
def _get_user(self):
(value,) = self.req['args']
orig_value = value
region = GAME_REGION
if region == "CT":
suf = value[-3:]
if suf in ('_RU', '_EU', '_NA', '_US', '_SG'):
region = value[-2:]
value = value[:-3]
if region == 'US':
region = 'NA'
else:
region = "RU"
cacheKey = "%s/%s" % (region, value)
data = None
if cacheKey not in self.cacheUser:
try:
token = config.token.token
if token is None:
err('No valid token for XVM network services (key=%s)' % cacheKey)
else:
data = xvmapi.getStatsByNick(region, value)
if data is not None:
self._fix_user(data, orig_value)
if 'nm' in data and '_id' in data:
self.cacheUser[region + "/" + data['nm']] = data
else:
self.cacheUser[cacheKey] = {}
except Exception:
err(traceback.format_exc())
with self.lock:
self.resp = self.cacheUser.get(cacheKey, {})
def _get_battle_stub(self, pl):
s = {
'vehicleID': pl.vehicleID,
'_id': pl.accountDBID,
'nm': pl.name,
'v': {'id': pl.vehCD},
}
return self._fix(s)
def _load_stat(self, playerVehicleID):
requestList = []
replay = isReplay()
all_cached = True
for (vehicleID, pl) in self.players.iteritems():
cacheKey = "%d=%d" % (pl.accountDBID, pl.vehCD)
if cacheKey not in self.cacheBattle:
all_cached = False
if pl.vehCD != 65281:
requestList.append("%d=%d%s" % (
pl.accountDBID,
pl.vehCD,
'=1' if not replay and pl.vehicleID == playerVehicleID else ''))
if all_cached or not requestList:
return
try:
accountDBID = utils.getAccountDBID()
if config.networkServicesSettings.statBattle:
data = self._load_data_online(accountDBID, ','.join(requestList))
else:
data = self._load_data_offline(accountDBID)
if data is None:
return
for stat in data['players']:
self._fix(stat)
#log(stat)
if 'nm' not in stat or not stat['nm']:
continue
if 'b' not in stat or stat['b'] <= 0:
continue
cacheKey = "%d=%d" % (stat['_id'], stat.get('v', {}).get('id', 0))
self.cacheBattle[cacheKey] = stat
except Exception:
err(traceback.format_exc())
def _load_data_online(self, accountDBID, request):
token = config.token.token
if token is None:
err('No valid token for XVM network services (id=%s)' % accountDBID)
return None
if isReplay():
data = xvmapi.getStatsReplay(request)
else:
data = xvmapi.getStats(request)
if data is None:
err('Stat request data is None')
return None
if 'players' not in data:
err('Malformed stat result: {}'.format(data))
return None
return data
def _load_data_offline(self, accountDBID):
players = []
for (vehicleID, pl) in self.players.iteritems():
players.append(self._get_battle_stub(pl))
return {'players': players}
def _fix(self, stat, orig_name=None):
self._fix_common(stat)
player = BigWorld.player()
team = player.team if hasattr(player, 'team') else 0
if self.players is not None:
for (vehicleID, pl) in self.players.iteritems():
if pl.accountDBID == stat['_id']:
stat['vehicleID'] = pl.vehicleID
if pl.clan:
stat['clan'] = pl.clan
cid = pl.clanInfo.get('cid', None) if pl.clanInfo else None
scid = stat.get('cid', None)
if (scid is None or scid == cid) and stat.get('rank') is not None and stat.get('emblem') is not None:
pl.clanInfo = {'cid': scid, 'rank': stat['rank'], 'emblem': stat['emblem']}
self._load_clanIcon(pl)
else:
stat['cid'] = cid
stat['rank'] = pl.clanInfo.get('rank', None) if pl.clanInfo else None
stat['emblem'] = pl.clanInfo.get('emblem', None) if pl.clanInfo else None
stat['name'] = pl.name
stat['team'] = TEAM.ALLY if team == pl.team else TEAM.ENEMY
stat['squadnum'] = pl.squadnum
stat['badgeId'] = pl.badgeId
if hasattr(pl, 'alive'):
stat['alive'] = pl.alive
if hasattr(pl, 'ready'):
stat['ready'] = pl.ready
if 'id' not in stat['v']:
stat['v']['id'] = pl.vehCD
break
self._fix_common2(stat, orig_name, False)
self._addContactData(stat)
return stat
def _fix_user(self, stat, orig_name=None):
self._fix_common(stat)
self._fix_common2(stat, orig_name, True)
self._addContactData(stat)
return stat
def _fix_common(self, stat):
if 'v' not in stat:
stat['v'] = {}
if stat.get('e', 0) <= 0:
stat['e'] = None
if stat.get('wn6', 0) <= 0:
stat['wn6'] = None
if stat.get('wn8', 0) <= 0:
stat['wn8'] = None
if stat.get('wgr', 0) <= 0:
stat['wgr'] = None
def _fix_common2(self, stat, orig_name, multiVehicles):
if orig_name is not None:
stat['name'] = orig_name
if 'b' in stat and 'w' in stat and stat['b'] > 0:
self._calculateGWR(stat)
self._calculateXvmScale(stat)
if multiVehicles:
for vehicleID, vData in stat['v'].iteritems():
vData['id'] = int(vehicleID)
self._calculateVehicleValues(stat, vData)
self._calculateXTDB(vData)
self._calculateXTE(vData)
else:
vData = stat['v']
if 'id' in vData:
self._calculateVehicleValues(stat, vData)
self._calculateXTDB(vData)
self._calculateXTE(vData)
# Global Win Rate (GWR)
def _calculateGWR(self, stat):
stat['winrate'] = float(stat['w']) / float(stat['b']) * 100.0
# XVM Scale
def _calculateXvmScale(self, stat):
if 'e' in stat and stat['e'] > 0:
stat['xeff'] = xvm_scale.XEFF(stat['e'])
if 'wn6' in stat and stat['wn6'] > 0:
stat['xwn6'] = xvm_scale.XWN6(stat['wn6'])
if 'wn8' in stat and stat['wn8'] > 0:
stat['xwn8'] = xvm_scale.XWN8(stat['wn8'])
if 'wgr' in stat and stat['wgr'] > 0:
stat['xwgr'] = xvm_scale.XWGR(stat['wgr'])
# calculate Vehicle values
def _calculateVehicleValues(self, stat, v):
vehicleID = v['id']
vData = vehinfo.getVehicleInfoData(vehicleID)
if vData is None:
return
#log(vData['key'])
#log(vData)
# tank rating
if 'b' not in v or 'w' not in v or v['b'] <= 0:
v['winrate'] = stat['winrate']
else:
Tr = float(v['w']) / float(v['b']) * 100.0
if v['b'] > 100:
v['winrate'] = Tr
else:
Or = float(stat['winrate'])
Tb = float(v['b']) / 100.0
Tl = float(min(vData['level'], 4)) / 4.0
v['winrate'] = Or - (Or - Tr) * Tb * Tl
if 'b' not in v or v['b'] <= 0:
return
vb = float(v['b'])
if 'dmg' in v and v['dmg'] > 0:
v['db'] = float(v['dmg']) / vb
v['dv'] = float(v['dmg']) / vb / vData['hpTop']
if 'frg' in v and v['frg'] > 0:
v['fb'] = float(v['frg']) / vb
if 'spo' in v and v['spo'] > 0:
v['sb'] = float(v['spo']) / vb
# calculate xTDB
def _calculateXTDB(self, v):
if 'db' not in v or v['db'] < 0:
return
v['xtdb'] = vehinfo_xtdb.calculateXTDB(v['id'], float(v['db']))
#log(v['xtdb'])
# calculate xTE
def _calculateXTE(self, v):
if 'db' not in v or v['db'] < 0:
return
if 'fb' not in v or v['fb'] < 0:
return
v['xte'] = vehinfo_xte.calculateXTE(v['id'], float(v['db']), float(v['fb']))
#log(str(v['id']) + " xte=" + str(v['xte']))
def _addContactData(self, stat):
# try to add changed nick and comment
try:
import xvm_contacts.python.contacts as contacts
stat['xvm_contact_data'] = contacts.getXvmContactData(stat['_id'])
except Exception:
#err(traceback.format_exc())
pass
def _load_clanIcon(self, pl):
try:
if hasattr(pl, 'x_emblem'):
BigWorld.callback(0,
lambda: as_xfw_cmd(XVM_COMMAND.AS_ON_CLAN_ICON_LOADED, pl.vehicleID, pl.name))
elif hasattr(pl, 'x_emblem_loading'):
return
elif pl.clanInfo:
rank = pl.clanInfo.get('rank', -1)
url = pl.clanInfo.get('emblem', None)
# url = 'http://stat.modxvm.com:81'
if url and 0 <= rank <= config.networkServicesSettings.topClansCount:
url = url.replace('{size}', '32x32')
tID = 'icons/clan/{0}'.format(pl.clanInfo['cid'])
self._loadingClanIconsCount += 1
pl.x_emblem_loading = True
debug('clan={0} rank={1} url={2}'.format(pl.clan, rank, url))
filecache.get_url(url, (lambda url, bytes: self._load_clanIcons_callback(pl, tID, bytes)))
except Exception:
err(traceback.format_exc())
def _load_clanIcons_callback(self, pl, tID, bytes):
try:
if bytes and imghdr.what(None, bytes) is not None:
# imgid = str(uuid.uuid4())
# BigWorld.wg_addTempScaleformTexture(imgid, bytes) # removed after first use?
imgid = 'icons/{0}.png'.format(pl.clan)
filecache.save(imgid, bytes)
del pl.x_emblem_loading
pl.x_emblem = 'res_mods/mods/shared_resources/xvm/cache/%s' % imgid
if hasattr(pl, 'clanicon'):
del pl.clanicon
as_xfw_cmd(XVM_COMMAND.AS_ON_CLAN_ICON_LOADED, pl.vehicleID, pl.name)
#debug('{} {} {} {}'.format(
# pl.clan,
# tID,
# len(bytes) if bytes else '(none)',
# imghdr.what(None, bytes) if bytes else ''))
except Exception:
err(traceback.format_exc())
finally:
self._loadingClanIconsCount -= 1
class _Player(object):
__slots__ = ('vehicleID', 'accountDBID', 'name', 'clan', 'clanInfo', 'badgeId', 'team',
'squadnum', 'vehCD', 'vLevel', 'maxHealth', 'vIcon', 'vn', 'vType', 'alive',
'ready', 'x_emblem', 'x_emblem_loading', 'clanicon')
sessionProvider = dependency.descriptor(IBattleSessionProvider)
def __init__(self, vehicleID, vData):
self.vehicleID = vehicleID
self.accountDBID = vData['accountDBID']
self.name = vData['name']
self.clan = vData['clanAbbrev']
self.badgeId = ''
ranked = vData.get('ranked', None)
if ranked and ranked[1]:
self.badgeId = str(ranked[1][0])
self.clanInfo = topclans.getClanInfo(self.clan)
self.vehCD = None
if 'typeCompDescr' in vData:
self.vehCD = vData['typeCompDescr']
elif 'vehicleType' in vData:
vtype = vData['vehicleType']
if hasattr(vtype, 'type'):
self.vehCD = vData['vehicleType'].type.compactDescr
if self.vehCD is None:
self.vehCD = 0
self.team = vData['team']
self.squadnum = 0
arenaDP = self.sessionProvider.getArenaDP()
if arenaDP is not None:
vInfo = arenaDP.getVehicleInfo(vID=vehicleID)
self.squadnum = vInfo.squadIndex
# if self.squadnum > 0:
# log("team=%d, squad=%d %s" % (self.team, self.squadnum, self.name))
def update(self, vData):
vtype = vData['vehicleType']
if hasattr(vtype, 'type'):
self.vehCD = vtype.type.compactDescr
self.vLevel = vtype.type.level
self.vIcon = vtype.type.name.replace(':', '-')
# self.vn = vtype.type.name
# self.vn = self.vn[self.vn.find(':')+1:].upper()
self.vType = set(VEHICLE_CLASS_TAGS.intersection(vtype.type.tags)).pop()
if hasattr(vtype, 'maxHealth'):
self.maxHealth = vtype.maxHealth
self.team = vData['team']
self.alive = vData['isAlive']
self.ready = vData['isAvatarReady']
_stat = _Stat()
|
client.py
|
import re
import shlex
import threading
import time
from typing import Dict, List, Optional, get_type_hints
import urllib3
from docopt import docopt
from prompt_toolkit import HTML, PromptSession
from prompt_toolkit.completion import Completer
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.patch_stdout import patch_stdout
from empire.arguments import args
from empire.client.src.bindings import bindings
from empire.client.src.EmpireCliConfig import empire_config
from empire.client.src.EmpireCliState import state
from empire.client.src.menus import Menu
from empire.client.src.menus.AdminMenu import admin_menu
from empire.client.src.menus.AgentMenu import agent_menu
from empire.client.src.menus.ChatMenu import chat_menu
from empire.client.src.menus.CredentialMenu import credential_menu
from empire.client.src.menus.EditListenerMenu import edit_listener_menu
from empire.client.src.menus.InteractMenu import interact_menu
from empire.client.src.menus.ListenerMenu import listener_menu
from empire.client.src.menus.MainMenu import main_menu
from empire.client.src.menus.PluginMenu import plugin_menu
from empire.client.src.menus.ProxyMenu import proxy_menu
from empire.client.src.menus.ShellMenu import shell_menu
from empire.client.src.menus.SponsorsMenu import sponsors_menu
from empire.client.src.menus.UseCredentialMenu import use_credential_menu
from empire.client.src.menus.UseListenerMenu import use_listener_menu
from empire.client.src.menus.UseModuleMenu import use_module_menu
from empire.client.src.menus.UsePluginMenu import use_plugin_menu
from empire.client.src.menus.UseStagerMenu import use_stager_menu
from empire.client.src.MenuState import menu_state
from empire.client.src.ShortcutHandler import shortcut_handler
from empire.client.src.utils import print_util
class MyCustomCompleter(Completer):
def __init__(self, empire_cli):
self.empire_cli = empire_cli
def get_completions(self, document, complete_event):
word_before_cursor = document.get_word_before_cursor(WORD=True)
try:
cmd_line = list(
map(lambda s: s.lower(), shlex.split(document.current_line))
)
if len(cmd_line) == 0:
cmd_line.append("")
except ValueError:
pass
else:
if not state.connected:
yield from self.empire_cli.menus["MainMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
# These commands should be accessible anywhere.
elif cmd_line[0] in ["uselistener"]:
yield from self.empire_cli.menus["UseListenerMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
elif cmd_line[0] in ["usestager"]:
yield from self.empire_cli.menus["UseStagerMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
elif cmd_line[0] in ["usemodule"]:
yield from self.empire_cli.menus["UseModuleMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
elif cmd_line[0] in ["interact"]:
yield from self.empire_cli.menus["InteractMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
elif cmd_line[0] in ["useplugin"]:
yield from self.empire_cli.menus["UsePluginMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
elif cmd_line[0] in ["usecredential"]:
yield from self.empire_cli.menus["UseCredentialMenu"].get_completions(
document, complete_event, cmd_line, word_before_cursor
)
else:
# Menu specific commands
yield from menu_state.current_menu.get_completions(
document, complete_event, cmd_line, word_before_cursor
)
class CliExitException(BaseException):
pass
class EmpireCli(object):
def __init__(self) -> None:
self.completer = MyCustomCompleter(self)
self.menus: Dict[Menu] = {
"MainMenu": main_menu,
"ListenerMenu": listener_menu,
"UseCredentialMenu": use_credential_menu,
"UseListenerMenu": use_listener_menu,
"EditListenerMenu": edit_listener_menu,
"UseStagerMenu": use_stager_menu,
"AgentMenu": agent_menu,
"UseModuleMenu": use_module_menu,
"InteractMenu": interact_menu,
"ShellMenu": shell_menu,
"CredentialMenu": credential_menu,
"PluginMenu": plugin_menu,
"UsePluginMenu": use_plugin_menu,
"AdminMenu": admin_menu,
"ChatMenu": chat_menu,
"SponsorsMenu": sponsors_menu,
"ProxyMenu": proxy_menu,
}
for menu in self.menus.values():
state.register_menu(menu)
@staticmethod
def strip(options):
return {re.sub("[^A-Za-z0-9 _]+", "", k): v for k, v in options.items()}
@staticmethod
def get_autoconnect_server() -> Optional[str]:
"""
Looks for a server in the yaml marked for autoconnect.
If one is not found, returns None
:return: the name of the server to autoconnect
"""
servers = empire_config.yaml.get("servers", {})
autoserver = list(
filter(lambda x: x[1].get("autoconnect") is True, servers.items())
)
if len(autoserver) > 0:
return autoserver[0][0]
return None
@staticmethod
def update_in_bg(session: PromptSession):
while True:
time.sleep(2)
session.message = HTML(menu_state.current_menu.get_prompt())
session.app.invalidate()
def main(self):
if empire_config.yaml.get("suppress-self-cert-warning", True):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Create some history first. (Easy for testing.)
history = InMemoryHistory()
history.append_string("help")
history.append_string("uselistener http")
history.append_string("listeners")
history.append_string("main")
history.append_string("connect -c localhost")
print_util.loading()
print("\n")
print("Use the 'connect' command to connect to your Empire server.")
print(
"'connect -c localhost' will connect to a local empire instance with all the defaults"
)
print("including the default username and password.")
session = PromptSession(
key_bindings=bindings,
history=history,
completer=self.completer,
complete_in_thread=True,
bottom_toolbar=state.bottom_toolbar,
)
t = threading.Thread(target=self.update_in_bg, args=[session])
t.daemon = True
t.start()
menu_state.push(main_menu)
autoserver = self.get_autoconnect_server()
if autoserver:
print(
print_util.color(f"[*] Attempting to connect to server: {autoserver}")
)
self.menus["MainMenu"].connect(autoserver, config=True)
if args.resource:
with open(args.resource) as resource_file:
print(print_util.color(f"[*] Executing Resource File: {args.resource}"))
for cmd in resource_file:
with patch_stdout(raw=True):
try:
time.sleep(1)
text = session.prompt(
accept_default=True, default=cmd.strip()
)
cmd_line = list(shlex.split(text))
self.parse_command_line(text, cmd_line, resource_file=True)
except CliExitException:
return
except Exception as e:
print(
print_util.color(
f"[*] Error parsing resource command: ", text
)
)
while True:
try:
with patch_stdout(raw=True):
text = session.prompt(
HTML(menu_state.current_menu.get_prompt()),
refresh_interval=None,
)
# cmd_line = list(map(lambda s: s.lower(), shlex.split(text)))
# TODO what to do about case sensitivity for parsing options.
cmd_line = list(shlex.split(text))
self.parse_command_line(text, cmd_line)
except KeyboardInterrupt:
print(print_util.color("[!] Type exit to quit"))
except ValueError as e:
print(print_util.color(f"[!] Error processing command: {e}"))
except EOFError:
break # Control-D pressed.
except CliExitException:
break
def parse_command_line(self, text: str, cmd_line: List[str], resource_file=False):
if len(cmd_line) == 0:
return
if not state.connected and not cmd_line[0] == "connect":
if cmd_line[0] == "exit":
choice = input(print_util.color("[>] Exit? [y/N] ", "red"))
if choice.lower() == "y":
raise CliExitException
else:
return
else:
return
# Switch Menus
if text.strip() == "main":
state.get_modules()
state.get_listeners()
print_util.title(
state.empire_version,
len(state.modules),
len(state.listeners),
len(state.get_active_agents()),
)
menu_state.push(self.menus["MainMenu"])
elif text.strip() == "listeners":
menu_state.push(self.menus["ListenerMenu"])
elif text.strip() == "chat":
menu_state.push(self.menus["ChatMenu"])
elif menu_state.current_menu_name == "ChatMenu":
menu_state.current_menu.send_chat(text)
elif text.strip() == "agents":
menu_state.push(self.menus["AgentMenu"])
elif text.strip() == "sponsors":
menu_state.push(self.menus["SponsorsMenu"])
elif text.strip() == "credentials":
menu_state.push(self.menus["CredentialMenu"])
elif text.strip() == "plugins":
menu_state.push(self.menus["PluginMenu"])
elif text.strip() == "admin":
menu_state.push(self.menus["AdminMenu"])
elif cmd_line[0] == "uselistener" and len(cmd_line) > 1:
if cmd_line[1] in state.listener_types:
menu_state.push(self.menus["UseListenerMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Listener not found: {cmd_line[1]}"))
elif cmd_line[0] == "usestager" and len(cmd_line) > 1:
if cmd_line[1] in state.stagers:
menu_state.push(self.menus["UseStagerMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Stager not found: {cmd_line[1]}"))
elif cmd_line[0] == "interact" and len(cmd_line) > 1:
if cmd_line[1] in state.agents:
menu_state.push(self.menus["InteractMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Agent not found: {cmd_line[1]}"))
elif cmd_line[0] == "useplugin" and len(cmd_line) > 1:
if cmd_line[1] in state.plugins:
menu_state.push(self.menus["UsePluginMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Plugin not found: {cmd_line[1]}"))
elif cmd_line[0] == "usecredential" and len(cmd_line) > 1:
if cmd_line[1] in state.credentials or cmd_line[1] == "add":
menu_state.push(self.menus["UseCredentialMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Credential not found: {cmd_line[1]}"))
elif cmd_line[0] == "usemodule" and len(cmd_line) > 1:
if cmd_line[1] in state.modules:
if menu_state.current_menu_name == "InteractMenu":
menu_state.push(
self.menus["UseModuleMenu"],
selected=cmd_line[1],
agent=menu_state.current_menu.selected,
)
else:
menu_state.push(self.menus["UseModuleMenu"], selected=cmd_line[1])
else:
print(print_util.color(f"[!] Module not found: {cmd_line[1]}"))
elif cmd_line[0] == "editlistener" and len(cmd_line) > 1:
if menu_state.current_menu_name == "ListenerMenu":
if cmd_line[1] in state.listeners:
menu_state.push(
self.menus["EditListenerMenu"], selected=cmd_line[1]
)
else:
print(print_util.color(f"[!] Listener not found: {cmd_line[1]}"))
elif text.strip() == "shell":
if menu_state.current_menu_name == "InteractMenu":
menu_state.push(
self.menus["ShellMenu"], selected=menu_state.current_menu.selected
)
else:
pass
elif menu_state.current_menu_name == "ShellMenu":
if text == "exit":
menu_state.push(
self.menus["InteractMenu"],
selected=menu_state.current_menu.selected,
)
else:
menu_state.current_menu.shell(menu_state.current_menu.selected, text)
elif text.strip() == "proxy":
if menu_state.current_menu_name == "InteractMenu":
if menu_state.current_menu.agent_options["language"] not in [
"python",
"ironpython",
]:
print(
print_util.color(
f'[!] Agent proxies are not available in {menu_state.current_menu.agent_options["language"]} agents'
)
)
pass
elif state.listeners[menu_state.current_menu.agent_options["listener"]][
"module"
] not in ["http", "http_hop", "redirector"]:
print(
print_util.color(
f"[!] Agent proxies are not available in {state.listeners[menu_state.current_menu.agent_options['listener']]['module']} listeners"
)
)
else:
menu_state.push(
self.menus["ProxyMenu"],
selected=menu_state.current_menu.selected,
)
else:
pass
elif text.strip() == "back":
menu_state.pop()
elif text.strip() == "exit":
if resource_file:
raise CliExitException
choice = input(print_util.color("[>] Exit? [y/N] ", "red"))
if choice.lower() == "y":
raise CliExitException
else:
pass
else:
func = None
try:
func = getattr(
menu_state.current_menu
if hasattr(menu_state.current_menu, cmd_line[0])
else self,
cmd_line[0],
)
except:
pass
if func:
try:
# If the command is set, wrap the value in quotes so docopt
# doesn't interpret it as a parameter. Also concatenate all the words
# after the 3rd word for easier autofilling with suggested values that have spaces
# There may be a better way to do this.
if cmd_line[0] == "set":
cmd_line[2] = f'"{" ".join(cmd_line[2:])}"'
del cmd_line[3:]
args = self.strip(docopt(func.__doc__, argv=cmd_line[1:]))
new_args = {}
# todo casting for type hinted values?
for key, hint in get_type_hints(func).items():
# if args.get(key) is not None:
if key != "return":
new_args[key] = args[key]
# print(new_args)
func(**new_args)
except Exception as e:
print(e)
pass
except SystemExit as e:
pass
elif not func and menu_state.current_menu_name == "InteractMenu":
if cmd_line[0] in shortcut_handler.get_names(
self.menus["InteractMenu"].agent_language
):
menu_state.current_menu.execute_shortcut(cmd_line[0], cmd_line[1:])
def start():
try:
empire = EmpireCli()
empire.main()
finally:
state.shutdown()
|
graphicInterface.py
|
from tkinter import filedialog
from tkinter import *
import ctypes
import tkinter.ttk as ttk
import ProcessMusicTrack
from enum import Enum
import shutil
import threading
import os.path
import signal
class ShowImageMode(Enum):
SPECTRUM = 'Spectrum'
PROBABILITY = 'Probability'
def get_screen_size():
user32 = ctypes.windll.user32
screen_size = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
return screen_size
def check_run_button():
if txt_first_track_name.get() != '' and txt_second_track_name.get() != '':
btn_run.configure(state=NORMAL)
else:
btn_run.configure(state=DISABLED)
def first_track_button_clicked():
global file_first_track
cpy_file_name = file_first_track
try:
file_first_track = filedialog.askopenfilename(
filetypes=(("Music files", "*.ogg"), ("Music files", "*.mp3"), ("Music files", "*.wav")))
track_name = re.search(r'[^/]*\.((wav)|(ogg)|(mp3))$', str(file_first_track)).group(0)
if str(cpy_file_name) != '':
last_track_name = re.search(r'[^/]*\.((wav)|(ogg)|(mp3))$', str(cpy_file_name)).group(0)
if os.path.exists('musics/' + str(last_track_name)[0:len(str(last_track_name)) - 4]):
shutil.rmtree(str(cpy_file_name)[0:len(str(cpy_file_name)) - 4])
txt_first_track_name.configure(state=NORMAL)
txt_first_track_name.delete(0, END)
txt_first_track_name.insert(0, track_name)
txt_first_track_name.configure(state=DISABLED)
check_run_button()
btn_show_details.configure(state=DISABLED)
label_result.config(text="Result:")
global created_folder_first_track
created_folder_first_track = False
print(txt_first_track_name.get())
except AttributeError:
print('not')
file_first_track = cpy_file_name
return
def second_track_button_clicked():
global file_second_track
cpy_file_name = file_second_track
try:
file_second_track = filedialog.askopenfilename(
filetypes=(("Music files", "*.ogg"), ("Music files", "*.mp3"), ("Music files", "*.wav")))
track_name = re.search(r'[^/]*\.((wav)|(ogg)|(mp3))$', str(file_second_track)).group(0)
if str(cpy_file_name) != '':
last_track_name = re.search(r'[^/]*\.((wav)|(ogg)|(mp3))$', str(cpy_file_name)).group(0)
if os.path.exists('musics/' + str(last_track_name)[0:len(str(last_track_name)) - 4]):
shutil.rmtree(str(cpy_file_name)[0:len(str(cpy_file_name)) - 4])
txt_second_track_name.configure(state=NORMAL)
txt_second_track_name.delete(0, END)
txt_second_track_name.insert(0, track_name)
txt_second_track_name.configure(state=DISABLED)
check_run_button()
btn_show_details.configure(state=DISABLED)
label_result.config(text="Result:")
global created_folder_second_track
created_folder_second_track = False
print(txt_second_track_name.get())
except:
print('not')
file_second_track = cpy_file_name
return
def check_buttons(track, btn_prev, btn_next, index):
if index == 0:
btn_prev.configure(state=DISABLED)
else:
btn_prev.configure(state=NORMAL)
if selected_title_tab1.get() == 0:
if index == len(track.spectral_matrix) - 1:
btn_next.configure(state=DISABLED)
else:
btn_next.configure(state=NORMAL)
else:
if index == len(track.distribution_density_for_notes) - 1:
btn_next.configure(state=DISABLED)
else:
btn_next.configure(state=NORMAL)
def show_title_tab1():
if selected_title_tab1.get() == 0:
label_title_tab1.configure(text="Notes spectrum")
check_buttons(processed_first_track, btn_previous_tab1, btn_next_tab1, index_of_images[0][0])
index_spinbox_tab1.config(to_=len(processed_first_track.spectral_matrix) - 1)
change_spinbox(index_spinbox_tab1, index_of_images[0][0])
show_image(index_of_images[0][0], processed_first_track, label_image_tab1, index_spinbox_tab1, ShowImageMode.SPECTRUM.value)
else:
if selected_title_tab1.get() == 1:
label_title_tab1.configure(text="Probability distribution")
check_buttons(processed_first_track, btn_previous_tab1, btn_next_tab1, index_of_images[0][1])
index_spinbox_tab1.configure(to_=len(processed_first_track.distribution_density_for_notes) - 1)
change_spinbox(index_spinbox_tab1, index_of_images[0][1])
show_image(index_of_images[0][1], processed_first_track, label_image_tab1, index_spinbox_tab1, ShowImageMode.PROBABILITY.value)
def show_title_tab2():
if selected_title_tab2.get() == 0:
label_title_tab2.configure(text="Notes spectrum")
check_buttons(processed_second_track, btn_previous_tab2, btn_next_tab2, index_of_images[1][0])
index_spinbox_tab2.config(to_=len(processed_second_track.spectral_matrix) - 1)
change_spinbox(index_spinbox_tab2, index_of_images[1][0])
show_image(index_of_images[1][0], processed_second_track, label_image_tab2, index_spinbox_tab2,
ShowImageMode.SPECTRUM.value)
else:
if selected_title_tab2.get() == 1:
label_title_tab2.configure(text="Probability distribution")
check_buttons(processed_second_track, btn_previous_tab2, btn_next_tab2, index_of_images[1][1])
index_spinbox_tab2.config(to_=len(processed_second_track.distribution_density_for_notes) - 1)
change_spinbox(index_spinbox_tab2, index_of_images[1][1])
show_image(index_of_images[1][1], processed_second_track, label_image_tab2, index_spinbox_tab2,
ShowImageMode.PROBABILITY.value)
def read_image(index, track, show_image_mode):
fille = ''
if show_image_mode == ShowImageMode.SPECTRUM.value:
fille = track.output_graphic_of_spectral_matrix(index)
if show_image_mode == ShowImageMode.PROBABILITY.value:
fille = track.output_graphic_of_probability_distribution(index)
return fille
def change_spinbox(index_spinbox, value):
index_spinbox.delete(0, END)
index_spinbox.insert(0, value)
def show_image(index, track, label_image, index_spinbox, show_image_mode):
photoImage = PhotoImage(file=read_image(index, track, show_image_mode))
label_image.configure(image=photoImage)
details_window.mainloop()
def btn_next_tab1_clicked():
if selected_title_tab1.get() == 0:
len_images = len(processed_first_track.spectral_matrix)
else:
len_images = len(processed_first_track.distribution_density_for_notes)
show_next_image(processed_first_track, label_image_tab1, index_spinbox_tab1, btn_previous_tab1, btn_next_tab1,
selected_title_tab1, index_of_images[0], len_images)
def btn_prev_tab1_clicked():
if selected_title_tab1.get() == 0:
len_images = len(processed_first_track.spectral_matrix)
else:
len_images = len(processed_first_track.distribution_density_for_notes)
show_previous_image(processed_first_track, label_image_tab1, index_spinbox_tab1, btn_previous_tab1, btn_next_tab1,
selected_title_tab1, index_of_images[0], len_images)
def changed_spinbox_tab1():
if selected_title_tab1.get() == 0:
len_images = len(processed_first_track.spectral_matrix)
else:
len_images = len(processed_first_track.distribution_density_for_notes)
if int(index_spinbox_tab1.get()) >= len_images:
change_spinbox(index_spinbox_tab1, len_images - 1)
if int(index_spinbox_tab1.get()) < 0:
change_spinbox(index_spinbox_tab1, 0)
#print(index_spinbox_tab1.get())
change_image(processed_first_track, label_image_tab1, index_spinbox_tab1, btn_previous_tab1, btn_next_tab1,
selected_title_tab1, index_of_images[0], len_images)
def btn_next_tab2_clicked():
if selected_title_tab2.get() == 0:
len_images = len(processed_second_track.spectral_matrix)
else:
len_images = len(processed_second_track.distribution_density_for_notes)
show_next_image(processed_second_track, label_image_tab2, index_spinbox_tab2, btn_previous_tab2, btn_next_tab2,
selected_title_tab2, index_of_images[1], len_images)
def btn_prev_tab2_clicked():
if selected_title_tab2.get() == 0:
len_images = len(processed_second_track.spectral_matrix)
else:
len_images = len(processed_second_track.distribution_density_for_notes)
show_previous_image(processed_second_track, label_image_tab2, index_spinbox_tab2, btn_previous_tab2, btn_next_tab2,
selected_title_tab2, index_of_images[1], len_images)
def changed_spinbox_tab2():
if selected_title_tab2.get() == 0:
len_images = len(processed_second_track.spectral_matrix)
else:
len_images = len(processed_second_track.distribution_density_for_notes)
if int(index_spinbox_tab2.get()) >= len_images:
change_spinbox(index_spinbox_tab2, len_images - 1)
if int(index_spinbox_tab2.get()) < 0:
change_spinbox(index_spinbox_tab2, 0)
#print(index_spinbox_tab2.get())
change_image(processed_second_track, label_image_tab2, index_spinbox_tab2, btn_previous_tab2, btn_next_tab2,
selected_title_tab2, index_of_images[1], len_images)
def show_next_image(track, label_image, index_spinbox, btn_prev, btn_next, selected_title, image_index, len_images):
if selected_title.get() == 0 and image_index[0] < len_images-1:
if image_index[0] == 0:
btn_prev.configure(state=NORMAL)
image_index[0] += 1
change_spinbox(index_spinbox, image_index[0])
if image_index[0] == len_images-1:
btn_next.configure(state=DISABLED)
show_image(image_index[0], track, label_image, index_spinbox, ShowImageMode.SPECTRUM.value)
if selected_title.get() == 1 and image_index[1] < len_images-1:
if image_index[1] == 0:
btn_prev.configure(state=NORMAL)
image_index[1] += 1
change_spinbox(index_spinbox, image_index[1])
if image_index[1] == len_images-1:
btn_next.configure(state=DISABLED)
show_image(image_index[1], track, label_image, index_spinbox, ShowImageMode.PROBABILITY.value)
def show_previous_image(track, label_image, index_spinbox, btn_prev, btn_next, selected_title, image_index, len_images):
if selected_title.get() == 0 and image_index[0] >= 0:
if image_index[0] == len_images-1:
btn_next.configure(state=NORMAL)
image_index[0] -= 1
change_spinbox(index_spinbox, image_index[0])
if image_index[0] == 0:
btn_prev.configure(state=DISABLED)
show_image(image_index[0], track, label_image, index_spinbox, ShowImageMode.SPECTRUM.value)
if selected_title.get() == 1 and image_index[1] >= 0:
if image_index[1] == len_images-1:
btn_next.configure(state=NORMAL)
image_index[1] -= 1
change_spinbox(index_spinbox, image_index[1])
if image_index[1] == 0:
btn_prev.configure(state=DISABLED)
show_image(image_index[1], track, label_image, index_spinbox, ShowImageMode.PROBABILITY.value)
def change_image(track, label_image, index_spinbox, btn_prev, btn_next, selected_title, image_index, len_images):
if selected_title.get() == 0:
btn_prev.configure(state=NORMAL)
btn_next.configure(state=NORMAL)
image_index[0] = int(index_spinbox.get())
if int(index_spinbox.get()) == 0:
btn_prev.configure(state=DISABLED)
if int(index_spinbox.get()) == len_images-1:
btn_next.configure(state=DISABLED)
show_image(image_index[0], track, label_image, index_spinbox, ShowImageMode.SPECTRUM.value)
if selected_title.get() == 1:
btn_prev.configure(state=NORMAL)
btn_next.configure(state=NORMAL)
image_index[1] = int(index_spinbox.get())
if int(index_spinbox.get()) == 0:
btn_prev.configure(state=DISABLED)
if int(index_spinbox.get()) == len_images-1:
btn_next.configure(state=DISABLED)
show_image(image_index[1], track, label_image, index_spinbox, ShowImageMode.PROBABILITY.value)
def details_window_on_closing():
global btn_show_details_clicked
global details_window
btn_show_details_clicked -= 1
details_window.destroy()
def enter_clicked(event):
global tab_control
index_tab = tab_control.tabs().index(tab_control.select())
if index_tab == 0:
changed_spinbox_tab1()
else:
if index_tab == 1:
changed_spinbox_tab2()
def show_window_details():
global btn_show_details_clicked
btn_show_details_clicked += 1
if btn_show_details_clicked < 2:
global details_window
details_window = Toplevel(main_window)
details_window.iconbitmap('icons/main_icon.ico')
details_window.title("Details")
details_window.resizable(width=False, height=False)
details_window.bind('<Return>', enter_clicked)
details_window.protocol("WM_DELETE_WINDOW", details_window_on_closing)
global tab_control
tab_control = ttk.Notebook(details_window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text=processed_first_track.music_track.short_track_name)
tab_control.add(tab2, text=processed_second_track.music_track.short_track_name)
tab_control.add(tab3, text='Divergence')
global index_of_images
index_of_images = [[0, 0], [0, 0]]
label_choose_tab1 = Label(tab1, text="Choose what you want to see:")
label_choose_tab1.grid(column=0, row=1, sticky=W)
global selected_title_tab1
selected_title_tab1 = IntVar()
rad_notes_spectrum_tab1 = Radiobutton(tab1, text='Notes spectrum', value=0, variable=selected_title_tab1,
command=show_title_tab1)
rad_notes_spectrum_tab1.grid(column=1, row=1, columnspan=1)
rad_probability_distribution_tab1 = Radiobutton(tab1, text='Probability distribution', value=1,
variable=selected_title_tab1, command=show_title_tab1)
rad_probability_distribution_tab1.grid(column=2, row=1, columnspan=1)
global label_title_tab1
label_title_tab1 = Label(tab1, text="Notes spectrum", font=15)
label_title_tab1.grid(column=1, row=0, sticky=N + S + W + E, pady=(0, 10))
global btn_previous_tab1
btn_previous_tab1 = Button(tab1, text="Previous", width=20, command=btn_prev_tab1_clicked)
btn_previous_tab1.grid(column=0, row=3, pady=5)
btn_previous_tab1.configure(state=DISABLED)
global index_spinbox_tab1
index_spinbox_tab1 = Spinbox(tab1, from_=0, to_=len(processed_first_track.spectral_matrix) - 1,
command=changed_spinbox_tab1)
index_spinbox_tab1.grid(column=1, row=3, pady=5)
global btn_next_tab1
btn_next_tab1 = Button(tab1, text="Next", width=20, command=btn_next_tab1_clicked)
btn_next_tab1.grid(column=2, row=3, pady=5)
photoImage1 = PhotoImage(
file=read_image(index_of_images[0][0], processed_first_track, ShowImageMode.SPECTRUM.value))
global label_image_tab1
label_image_tab1 = Label(tab1, image=photoImage1)
label_image_tab1.grid(column=0, columnspan=3, row=2)
label_choose_tab2 = Label(tab2, text="Choose what you want to see:")
label_choose_tab2.grid(column=0, row=1, sticky=W)
global selected_title_tab2
selected_title_tab2 = IntVar()
rad_notes_spectrum_tab2 = Radiobutton(tab2, text='Notes spectrum', value=0, variable=selected_title_tab2,
command=show_title_tab2)
rad_notes_spectrum_tab2.grid(column=1, row=1, columnspan=1)
rad_probability_distribution_tab2 = Radiobutton(tab2, text='Probability distribution', value=1,
variable=selected_title_tab2, command=show_title_tab2)
rad_probability_distribution_tab2.grid(column=2, row=1, columnspan=1)
global label_title_tab2
label_title_tab2 = Label(tab2, text="Notes spectrum", font=15)
label_title_tab2.grid(column=1, row=0, sticky=N + S + W + E, pady=(0, 10))
global btn_previous_tab2
btn_previous_tab2 = Button(tab2, text="Previous", width=20, command=btn_prev_tab2_clicked)
btn_previous_tab2.grid(column=0, row=3, pady=5)
btn_previous_tab2.configure(state=DISABLED)
global index_spinbox_tab2
index_spinbox_tab2 = Spinbox(tab2, from_=0, to_=len(processed_second_track.spectral_matrix) - 1,
command=changed_spinbox_tab2)
index_spinbox_tab2.grid(column=1, row=3, pady=5)
global btn_next_tab2
btn_next_tab2 = Button(tab2, text="Next", width=20, command=btn_next_tab2_clicked)
btn_next_tab2.grid(column=2, row=3, pady=5)
photoImage2 = PhotoImage(
file=read_image(index_of_images[1][0], processed_second_track, ShowImageMode.SPECTRUM.value))
global label_image_tab2
label_image_tab2 = Label(tab2, image=photoImage2)
label_image_tab2.grid(column=0, columnspan=3, row=2)
photoImage3 = PhotoImage(
file=ProcessMusicTrack.ProcessMusicTrack.output_graphic_divergence_of_tracks(processed_first_track, divergence_vec,
processed_first_track.music_track.short_track_name,
processed_second_track.music_track.short_track_name))
label_image_tab3 = Label(tab3, image=photoImage3)
label_image_tab3.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
details_window.transient(main_window)
details_window.grab_set()
details_window.focus_set()
details_window.wait_window()
details_window.mainloop()
def modal_window_on_closing():
print('')
def modal_window_continue_clicked():
global modal_window
modal_window.destroy()
def show_modal_window():
global modal_window
modal_window = Toplevel(main_window)
modal_window.iconbitmap('icons/main_icon.ico')
modal_window.title("Waiting window")
modal_window.resizable(width=False, height=False)
modal_window.protocol("WM_DELETE_WINDOW", modal_window_on_closing)
global image_warning
label_image_warning = Label(modal_window, image=image_warning)
label_image_warning.grid(column=0, row=0, pady=5, padx=5)
label_message = Label(modal_window, text="Start processing! Please, wait...")
label_message.grid(column=1, row=0, pady=5, padx=5)
global btn_continue
btn_continue = Button(modal_window, text="Continue", width=20, command=modal_window_continue_clicked)
btn_continue.grid(column=0, row=1, columnspan=2, pady=5)
btn_continue.configure(state=DISABLED)
modal_window.transient(main_window)
modal_window.grab_set()
modal_window.focus_set()
modal_window.wait_window()
modal_window.mainloop()
def strart_process(spectrum, filter):
run_first_track(str(file_first_track), spectrum, filter)
run_second_track(str(file_second_track), spectrum, filter)
global created_folder_first_track
global created_folder_second_track
created_folder_first_track = True
created_folder_second_track = True
global divergence_vec
global processed_first_track
global processed_second_track
divergence_vec, divergence_mean, _ = ProcessMusicTrack.ProcessMusicTrack.get_divergence_of_tracks(
processed_first_track, processed_second_track)
label_result.config(text="Result: " + str(round(divergence_mean, 3)))
btn_show_details.configure(state=NORMAL)
global btn_continue
btn_continue.configure(state=NORMAL)
def run_clicked():
print('Run')
try:
spectrum = NONE
filter = NONE
print(selected_spectrum.get())
print(selected_filter.get())
if selected_spectrum.get() == 0:
spectrum = ProcessMusicTrack.SpectrumMode.TRANSPOSED_OCTAVE
else:
spectrum = ProcessMusicTrack.SpectrumMode.FULL_SPECTRUM
if selected_filter.get() == 0:
filter = ProcessMusicTrack.FilterMode.BY_COEFFICIENT
else:
filter = ProcessMusicTrack.FilterMode.BY_NUMBER
global created_folder_first_track
global created_folder_second_track
th = threading.Thread(target=strart_process, args=(spectrum, filter,))
th.start()
show_modal_window()
except:
return
def run_first_track(track_1, spectrum, filter):
global processed_first_track
processed_first_track = ProcessMusicTrack.ProcessMusicTrack(track_1, spectrum, filter)
print(track_1)
processed_first_track.start_processing()
def run_second_track(track_2, spectrum, filter):
global processed_second_track
processed_second_track = ProcessMusicTrack.ProcessMusicTrack(track_2, spectrum, filter)
print(track_2)
processed_second_track.start_processing()
def change_rad_main():
btn_show_details.configure(state=DISABLED)
label_result.config(text="Result:")
def main_window_on_closing():
global created_folder_first_track
global created_folder_second_track
global file_first_track
print(str(file_first_track))
if str(file_first_track) != '' and created_folder_first_track:
shutil.rmtree(str(file_first_track)[0:len(str(file_first_track)) - 4])
global file_second_track
print(str(file_second_track))
if str(file_second_track) != '' and created_folder_second_track:
shutil.rmtree(str(file_second_track)[0:len(str(file_second_track)) - 4])
main_window.destroy()
os.kill(os.getpid(), signal.SIGTERM)
def show_main_window():
global main_window
main_window = Tk()
main_window.title("Similitary of musics tracks")
main_window.iconbitmap('icons/main_icon.ico')
main_window.resizable(width=False, height=False)
main_window.protocol("WM_DELETE_WINDOW", main_window_on_closing)
global processed_first_track
global processed_second_track
global file_first_track
file_first_track = ''
global file_second_track
file_second_track = ''
global image_warning
image_warning = PhotoImage(file='icons/warning.png')
global label_first_track
label_first_track = Label(main_window, text="Music track №1")
label_first_track.grid(column=0, row=0, pady=5, padx=5)
global txt_first_track_name
txt_first_track_name = Entry(main_window, state="normal")
txt_first_track_name.grid(column=1, row=0, columnspan=2, sticky=N+S+W+E, pady=5)
txt_first_track_name.configure(state=DISABLED)
global btn_choose_first_track
btn_choose_first_track = Button(main_window, text="Choose track №1", command=first_track_button_clicked)
btn_choose_first_track.grid(column=3, row=0, sticky=N+S+W+E, pady=5, padx=5)
global label_second_track_name
label_second_track_name = Label(main_window, text="Music track №2")
label_second_track_name.grid(column=0, row=1, pady=5, padx=5)
global txt_second_track_name
txt_second_track_name = Entry(main_window, width=10, state="normal")
txt_second_track_name.grid(column=1, row=1, columnspan=2, sticky=N+S+W+E, pady=5)
txt_second_track_name.configure(state=DISABLED)
global btn_choose_second_track
btn_choose_second_track = Button(main_window, text="Choose track №2", command=second_track_button_clicked)
btn_choose_second_track.grid(column=3, row=1, pady=5, padx=5)
global label_filter_radiobutton
label_filter_radiobutton = Label(main_window, text="Filtration method: ")
label_filter_radiobutton.grid(column=0, row=2, pady=(30,5), padx=5, sticky=W)
global selected_filter
selected_filter = IntVar()
print(selected_filter.get())
rad_filter_thresold = Radiobutton(main_window, text='Threshold filtering', value=0, variable=selected_filter, command=change_rad_main)
rad_filter_thresold.grid(column=1, row=2, columnspan=1, pady=(30,5), padx=5)
rad_filter_number = Radiobutton(main_window, text='Filtering by number', value=1, variable=selected_filter, command=change_rad_main)
rad_filter_number.grid(column=2, row=2, columnspan=1, pady=(30,5), padx=5)
global label_spectrum_radiobutton
label_spectrum_radiobutton = Label(main_window, text="Spectrum: ")
label_spectrum_radiobutton.grid(column=0, row=3, pady=5, padx=5, sticky=W)
global selected_spectrum
selected_spectrum = IntVar()
print(selected_spectrum.get())
rad_transposed = Radiobutton(main_window, text='Transposed octave', value=0, variable=selected_spectrum, command=change_rad_main)
rad_transposed.grid(column=1, row=3, columnspan=1, pady=5, padx=5)
rad_full_spectrum = Radiobutton(main_window, text='Full spectrum', value=1, variable=selected_spectrum, command=change_rad_main)
rad_full_spectrum.grid(column=2, row=3, columnspan=1, pady=5, padx=5)
global btn_run
btn_run = Button(main_window, text="Run", command=run_clicked)
btn_run.grid(column=3, row=4, sticky=N+S+W+E, pady=(30,5), padx=5)
btn_run.configure(state=DISABLED)
global label_result
label_result = Label(main_window, text="Result:")
label_result.grid(column=0, row=4, rowspan=2, sticky=W, pady=(30,5), padx=5)
global btn_show_details
global btn_show_details_clicked
btn_show_details_clicked = 0
btn_show_details = Button(main_window, text="Show details", command=show_window_details)
btn_show_details.grid(column=3, row=5, sticky=N+S+W+E, pady=5, padx=5)
btn_show_details.configure(state=DISABLED)
main_window.mainloop()
if __name__ == '__main__':
show_main_window()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10 # In units of seconds
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed.
#
def lprint(*args):
if (lisp_debug_logging == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args: print arg,
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data):
if (len(data) < 20):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 40, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
self.hash_packet()
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def fragment(self):
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
return(self.inner_protocol == LISP_UDP_PROTOCOL and
self.inner_dport == LISP_TRACE_PORT)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and lisp_rtr_list.has_key(rtr)):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = [] if len(sg_rloc_set) == 0 else \
sg_rloc_set[0].rle.rle_nodes
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False), rloc.rle.print_rle(False)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
#
# For the odd zero byte in front of "::", never print "xx00:: ...".
#
if (addr_str[2:6] == "00::"):
addr_str = addr_str[0:2] + addr_str[4::]
#endif
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid is 0 and mask_len is 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = blue(rle_node.rloc_name, html) if \
rle_node.rloc_name != None else ""
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else "-" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
return(elapsed >= self.map_cache_ttl)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length is 0):
self.stats.increment(len(packet))
return([None, None, None,self.action, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON Data ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("I", first_long)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (socket.ntohl(first_long) != 0x90000000): return(False)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, rts_rloc, packet):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(packet, (rts_rloc, LISP_TRACE_PORT))
s.close()
#enddef
def packet_length(self):
udp = 8; trace = 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return(None)
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return(packet)
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
if (nat_info and len(lisp_sockets) == 4):
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest,
nat_info, packet)
return
#endif
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname in private-rloc in Info-Reply. Encode it as
# an AFI=17 distinguished-name.
#
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reasssigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Check acitvity timers for encapsulation entries only.
#
if (mc.action == LISP_NO_ACTION):
now = lisp_get_timestamp()
if (mc.last_refresh_time + mc.map_cache_ttl > now):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_addr, source, port, nonce, hop_count,
ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc_addr.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}".format(probe,
red(map_reply_addr, False), red(source_addr, False)))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is the EID for this system.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (db.eid.is_exact_match(eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, ed="encap"):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
entry["drloc"] = next_rloc
entry["hostname"] = lisp_hostname
key = ed + "-timestamp"
entry[key] = lisp_get_timestamp()
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length. Zero UDP checksum.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[20::]
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else 40 + udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#------------------------------------------------------------------------------
|
picam.py
|
import logging
from io import BytesIO, StringIO
import time
from PIL import Image
import threading
import queue
log = logging.getLogger(__name__)
picamera_override = None
class Picamera():
def __init__(self, image_format='jpeg', queue_max_size=10):
self.error = None
self.format = image_format
self.queue = queue.Queue(queue_max_size)
self._stop = threading.Event()
self.thread1 = threading.Thread(target=self.run, args=())
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
return self
def start(self):
self._stop.clear()
self.thread1.start()
def has_failure(self):
return self.error is not None
def _get_camera(self):
if picamera_override is None:
try:
import picamera
return picamera.PiCamera()
except Exception as err:
log.warning("Error loading picamera module: %s" % err)
self.error = err
return None
else:
return picamera_override.PiCamera()
def run(self):
with self._get_camera() as camera:
if self.has_failure():
return None
log.debug("Started Picamera")
time.sleep(2)
stream = BytesIO()
for _ in camera.capture_continuous(stream, format=self.format):
if self._stop.is_set():
log.debug("Stop requested")
break
if not self.queue.full():
try:
self.queue.put(
Image.open(BytesIO(stream.getvalue())),
block=False
)
log.debug("Queued capture")
except queue.Full:
pass
except Exception as ex:
log.error("Failed to add to queue: %s" % ex)
stream.seek(0)
stream.truncate()
try:
stream.close()
except Exception as ex:
log.error("Failed to close stream: %s" % ex)
def acquire(self):
try:
# log.debug("queue len=%s" % self.queue.qsize())
return self.queue.get(block=False)
except queue.Empty:
return None
def stop(self):
self._stop.set()
self.queue = queue.Queue()
self.thread1.join()
|
build_mscoco_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
from six.moves import xrange
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
# tf.flags.DEFINE_integer("num_threads", 1,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
# print (type(value), value)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode("utf-8")]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v.encode("utf-8")) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "rb") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import socket
import ssl
import mimetypes
import webbrowser
import struct
import socket
import base64
import hashlib
import sys
import threading
import signal
import time
import os
import re
try:
from urllib import unquote
from urllib import quote
from urlparse import urlparse
from urlparse import parse_qs
except ImportError:
from urllib.parse import unquote
from urllib.parse import quote
from urllib.parse import unquote_to_bytes
from urllib.parse import urlparse
from urllib.parse import parse_qs
import cgi
import weakref
clients = {}
runtimeInstances = weakref.WeakValueDictionary()
pyLessThan3 = sys.version_info < (3,)
_MSG_ACK = '3'
_MSG_JS = '2'
_MSG_UPDATE = '1'
def to_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return quote(data)
return quote(data, encoding='utf-8')
def from_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return unquote(data)
return unquote(data, encoding='utf-8')
def encode_text(data):
if not pyLessThan3:
return data.encode('utf-8')
return data
def get_method_by_name(root_node, name):
val = None
if hasattr(root_node, name):
val = getattr(root_node, name)
return val
def get_method_by_id(_id):
global runtimeInstances
return runtimeInstances.get(str(_id), None)
def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except:
pass
return session_value
class WebSocketsHandler(socketserver.StreamRequestHandler):
magic = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def __init__(self, headers, *args, **kwargs):
self.headers = headers
self.handshake_done = False
self._log = logging.getLogger('remi.server.ws')
socketserver.StreamRequestHandler.__init__(self, *args, **kwargs)
def setup(self):
socketserver.StreamRequestHandler.setup(self)
self._log.info('connection established: %r' % (self.client_address,))
self.handshake_done = False
def handle(self):
global clients
self._log.debug('handle')
# on some systems like ROS, the default socket timeout
# is less than expected, we force it to infinite (None) as default socket value
self.request.settimeout(None)
if self.handshake():
while True:
if not self.read_next_message():
clients[self.session].websockets.remove(self)
self.handshake_done = False
self._log.debug('ws ending websocket service')
break
@staticmethod
def bytetonum(b):
if pyLessThan3:
b = ord(b)
return b
def read_next_message(self):
# noinspection PyBroadException
try:
try:
length = self.rfile.read(2)
except ValueError:
# socket was closed, just return without errors
return False
length = self.bytetonum(length[1]) & 127
if length == 126:
length = struct.unpack('>H', self.rfile.read(2))[0]
elif length == 127:
length = struct.unpack('>Q', self.rfile.read(8))[0]
masks = [self.bytetonum(byte) for byte in self.rfile.read(4)]
decoded = ''
for char in self.rfile.read(length):
decoded += chr(self.bytetonum(char) ^ masks[len(decoded) % 4])
self.on_message(from_websocket(decoded))
except socket.timeout:
return False
except Exception:
return False
return True
def send_message(self, message):
if not self.handshake_done:
self._log.warning("ignoring message %s (handshake not done)" % message[:10])
return
self._log.debug('send_message: %s... -> %s' % (message[:10], self.client_address))
out = bytearray()
out.append(129)
length = len(message)
if length <= 125:
out.append(length)
elif 126 <= length <= 65535:
out.append(126)
out += struct.pack('>H', length)
else:
out.append(127)
out += struct.pack('>Q', length)
if not pyLessThan3:
message = message.encode('utf-8')
out = out + message
self.request.send(out)
def handshake(self):
self._log.debug('handshake')
key = self.headers['Sec-WebSocket-Key']
self.session = None
if 'cookie' in self.headers:
if self.headers['cookie']!=None:
self.session = parse_session_cookie(self.headers['cookie'])
if self.session == None:
return False
if not self.session in clients.keys():
return False
digest = hashlib.sha1((key.encode("utf-8")+self.magic))
digest = digest.digest()
digest = base64.b64encode(digest)
response = 'HTTP/1.1 101 Switching Protocols\r\n'
response += 'Upgrade: websocket\r\n'
response += 'Connection: Upgrade\r\n'
response += 'Sec-WebSocket-Accept: %s\r\n\r\n' % digest.decode("utf-8")
self._log.info('handshake complete')
self.request.sendall(response.encode("utf-8"))
self.handshake_done = True
#if an update happens since the websocket connection to its handshake,
# it gets not displayed. it is required to inform App about handshake done,
# to get a full refresh
clients[self.session].websocket_handshake_done(self)
return True
def on_message(self, message):
global runtimeInstances
self.send_message(_MSG_ACK)
with clients[self.session].update_lock:
# noinspection PyBroadException
try:
# saving the websocket in order to update the client
if self not in clients[self.session].websockets:
clients[self.session].websockets.append(self)
# parsing messages
chunks = message.split('/')
self._log.debug('on_message: %s' % chunks[0])
if len(chunks) > 3: # msgtype,widget,function,params
# if this is a callback
msg_type = 'callback'
if chunks[0] == msg_type:
widget_id = chunks[1]
function_name = chunks[2]
params = message[
len(msg_type) + len(widget_id) + len(function_name) + 3:]
param_dict = parse_parametrs(params)
callback = get_method_by_name(runtimeInstances[widget_id], function_name)
if callback is not None:
callback(**param_dict)
except Exception:
self._log.error('error parsing websocket', exc_info=True)
def close(self, terminate_server=True):
try:
self.request.shutdown(socket.SHUT_WR)
self.finish()
if terminate_server:
self.server.shutdown()
except:
self._log.error("exception in WebSocketsHandler.close method", exc_info=True)
def parse_parametrs(p):
"""
Parses the parameters given from POST or websocket reqs
expecting the parameters as: "11|par1='asd'|6|par2=1"
returns a dict like {par1:'asd',par2:1}
"""
ret = {}
while len(p) > 1 and p.count('|') > 0:
s = p.split('|')
l = int(s[0]) # length of param field
if l > 0:
p = p[len(s[0]) + 1:]
field_name = p.split('|')[0].split('=')[0]
field_value = p[len(field_name) + 1:l]
p = p[l + 1:]
ret[field_name] = field_value
return ret
# noinspection PyPep8Naming
class App(BaseHTTPRequestHandler, object):
"""
This class will handles any incoming request from the browser
The main application class can subclass this
In the do_POST and do_GET methods it is expected to receive requests such as:
- function calls with parameters
- file requests
"""
re_static_file = re.compile(r"^([\/]*[\w\d]+:[-_. $@?#£'%=()\/\[\]!+°§^,\w\d]+)") #https://regex101.com/r/uK1sX1/6
re_attr_call = re.compile(r"^/*(\w+)\/(\w+)\?{0,1}(\w*\={1}(\w|\.)+\&{0,1})*$")
def __init__(self, request, client_address, server, **app_args):
self._app_args = app_args
self.root = None
self._log = logging.getLogger('remi.request')
super(App, self).__init__(request, client_address, server)
def _get_list_from_app_args(self, name):
try:
v = self._app_args[name]
if isinstance(v, (tuple, list)):
vals = v
else:
vals = [v]
except KeyError:
vals = []
return vals
def _instance(self):
global clients
global runtimeInstances
"""
This method is used to get the Application instance previously created
managing on this, it is possible to switch to "single instance for
multiple clients" or "multiple instance for multiple clients" execution way
"""
self.session = 0
#checking previously defined session
if 'cookie' in self.headers:
self.session = parse_session_cookie(self.headers['cookie'])
#if not a valid session id
if self.session == None:
self.session = 0
if not self.session in clients.keys():
self.session = 0
#if no session id
if self.session == 0:
if self.server.multiple_instance:
self.session = int(time.time()*1000)
#send session to browser
del self.headers['cookie']
#if the client instance doesn't exist
if not(self.session in clients):
self.update_interval = self.server.update_interval
from remi import gui
head = gui.HEAD(self.server.title)
# use the default css, but append a version based on its hash, to stop browser caching
head.add_child('internal_css', "<link href='/res:style.css' rel='stylesheet' />\n")
body = gui.BODY()
body.onload.connect(self.onload)
body.onerror.connect(self.onerror)
body.ononline.connect(self.ononline)
body.onpagehide.connect(self.onpagehide)
body.onpageshow.connect(self.onpageshow)
body.onresize.connect(self.onresize)
self.page = gui.HTML()
self.page.add_child('head', head)
self.page.add_child('body', body)
if not hasattr(self, 'websockets'):
self.websockets = []
self.update_lock = threading.RLock()
if not hasattr(self, '_need_update_flag'):
self._need_update_flag = False
self._stop_update_flag = False
if self.update_interval > 0:
self._update_thread = threading.Thread(target=self._idle_loop)
self._update_thread.setDaemon(True)
self._update_thread.start()
runtimeInstances[str(id(self))] = self
clients[self.session] = self
else:
#restore instance attributes
client = clients[self.session]
self.websockets = client.websockets
self.page = client.page
self.update_lock = client.update_lock
self.update_interval = client.update_interval
self._need_update_flag = client._need_update_flag
if hasattr(client, '_update_thread'):
self._update_thread = client._update_thread
net_interface_ip = self.headers.get('Host', "%s:%s"%(self.connection.getsockname()[0],self.server.server_address[1]))
websocket_timeout_timer_ms = str(self.server.websocket_timeout_timer_ms)
pending_messages_queue_length = str(self.server.pending_messages_queue_length)
self.page.children['head'].set_internal_js(net_interface_ip, pending_messages_queue_length, websocket_timeout_timer_ms)
def main(self, *_):
""" Subclasses of App class *must* declare a main function
that will be the entry point of the application.
Inside the main function you have to declare the GUI structure
and return the root widget. """
raise NotImplementedError("Applications must implement 'main()' function.")
def _idle_loop(self):
""" This is used to exec the idle function in a safe context and a separate thread
"""
while not self._stop_update_flag:
time.sleep(self.update_interval)
with self.update_lock:
try:
self.idle()
except:
self._log.error("exception in App.idle method", exc_info=True)
if self._need_update_flag:
try:
self.do_gui_update()
except:
self._log.error('''exception during gui update. It is advisable to
use App.update_lock using external threads.''', exc_info=True)
def idle(self):
""" Idle function called every UPDATE_INTERVAL before the gui update.
Useful to schedule tasks. """
pass
def _need_update(self, emitter=None):
if self.update_interval == 0:
#no interval, immadiate update
self.do_gui_update()
else:
#will be updated after idle loop
self._need_update_flag = True
def do_gui_update(self):
""" This method gets called also by Timer, a new thread, and so needs to lock the update
"""
with self.update_lock:
changed_widget_dict = {}
self.root.repr(changed_widget_dict)
for widget in changed_widget_dict.keys():
html = changed_widget_dict[widget]
__id = str(widget.identifier)
self._send_spontaneous_websocket_message(_MSG_UPDATE + __id + ',' + to_websocket(html))
self._need_update_flag = False
def websocket_handshake_done(self, ws_instance_to_update):
with self.update_lock:
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
ws_instance_to_update.send_message(msg)
def set_root_widget(self, widget):
self.page.children['body'].append(widget, 'root')
self.root = widget
self.root.disable_refresh()
self.root.attributes['data-parent-widget'] = str(id(self))
self.root._parent = self
self.root.enable_refresh()
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
self._send_spontaneous_websocket_message(msg)
def _send_spontaneous_websocket_message(self, message):
for ws in self.websockets[:]:
# noinspection PyBroadException
try:
#self._log.debug("sending websocket spontaneous message")
ws.send_message(message)
except:
self._log.error("sending websocket spontaneous message", exc_info=True)
try:
self.websockets.remove(ws)
ws.close(terminate_server=False)
except:
self._log.error("unable to remove websocket client - already not in list", exc_info=True)
def execute_javascript(self, code):
self._send_spontaneous_websocket_message(_MSG_JS + code)
def notification_message(self, title, content, icon=""):
"""This function sends "javascript" message to the client, that executes its content.
In this particular code, a notification message is shown
"""
code = """
var options = {
body: "%(content)s",
icon: "%(icon)s"
}
if (!("Notification" in window)) {
alert("%(content)s");
}else if (Notification.permission === "granted") {
var notification = new Notification("%(title)s", options);
}else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification("%(title)s", options);
}
});
}
""" % {'title': title, 'content': content, 'icon': icon}
self.execute_javascript(code)
def do_POST(self):
self._instance()
file_data = None
# listener_widget = None
# listener_function = None
try:
# Parse the form data posted
filename = self.headers['filename']
listener_widget = runtimeInstances[self.headers['listener']]
listener_function = self.headers['listener_function']
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
# Echo back information about what was posted in the form
for field in form.keys():
field_item = form[field]
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
self._log.debug('post: uploaded %s as "%s" (%d bytes)\n' % (field, field_item.filename, file_len))
get_method_by_name(listener_widget, listener_function)(file_data, filename)
else:
# Regular form value
self._log.debug('post: %s=%s\n' % (field, form[field].value))
if file_data is not None:
# the filedata is sent to the listener
self._log.debug('GUI - server.py do_POST: fileupload name= %s' % (filename))
self.send_response(200)
except Exception:
self._log.error('post: failed', exc_info=True)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Protected\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
# check here request header to identify the type of req, if http or ws
# if this is a ws req, instance a ws handler, add it to App's ws list, return
if "Upgrade" in self.headers:
if self.headers['Upgrade'] == 'websocket':
#passing arguments to websocket handler, otherwise it will lost the last message,
# and will be unable to handshake
ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server)
return
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with self.update_lock:
# build the root page once if necessary
if not 'root' in self.page.children['body'].children.keys():
self._log.info('built UI (path=%s)' % path)
self.set_root_widget(self.main(*self.server.userdata))
self._process_all(path)
except:
self._log.error('error processing GET request', exc_info=True)
def _get_static_file(self, filename):
filename = filename.replace("..", "") #avoid backdirs
__i = filename.find(':')
if __i < 0:
return None
key = filename[:__i]
path = filename[__i+1:]
key = key.replace("/","")
paths = {'res': os.path.join(os.path.dirname(__file__), "res")}
static_paths = self._app_args.get('static_file_path', {})
if not type(static_paths)==dict:
self._log.error("App's parameter static_file_path must be a Dictionary.", exc_info=False)
static_paths = {}
paths.update(static_paths)
if not key in paths:
return None
return os.path.join(paths[key], path)
def _process_all(self, func):
self._log.debug('get: %s' % func)
static_file = self.re_static_file.match(func)
attr_call = self.re_attr_call.match(func)
if (func == '/') or (not func):
self.send_response(200)
self.send_header("Set-Cookie", "remi_session=%s"%(self.session))
self.send_header('Content-type', 'text/html')
self.end_headers()
with self.update_lock:
# render the HTML
page_content = self.page.repr()
self.wfile.write(encode_text("<!DOCTYPE html>\n"))
self.wfile.write(encode_text(page_content))
elif static_file:
filename = self._get_static_file(static_file.groups()[0])
if not filename:
self.send_response(404)
return
mimetype, encoding = mimetypes.guess_type(filename)
self.send_response(200)
self.send_header('Content-type', mimetype if mimetype else 'application/octet-stream')
if self.server.enable_file_cache:
self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filename, 'rb') as f:
content = f.read()
self.wfile.write(content)
elif attr_call:
with self.update_lock:
param_dict = parse_qs(urlparse(func).query)
# parse_qs returns patameters as list, here we take the first element
for k in param_dict:
param_dict[k] = param_dict[k][0]
widget, func = attr_call.group(1, 2)
try:
content, headers = get_method_by_name(get_method_by_id(widget), func)(**param_dict)
if content is None:
self.send_response(503)
return
self.send_response(200)
except IOError:
self._log.error('attr %s/%s call error' % (widget, func), exc_info=True)
self.send_response(404)
return
except (TypeError, AttributeError):
self._log.error('attr %s/%s not available' % (widget, func))
self.send_response(503)
return
for k in headers:
self.send_header(k, headers[k])
self.end_headers()
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(encode_text(content))
def close(self):
""" Command to initiate an App to close
"""
self._log.debug('shutting down...')
self.server.server_starter_instance.stop()
def on_close(self):
""" Called by the server when the App have to be terminated
"""
self._stop_update_flag = True
for ws in self.websockets:
ws.close()
def onload(self, emitter):
""" WebPage Event that occurs on webpage loaded
"""
self._log.debug('App.onload event occurred')
def onerror(self, emitter, message, source, lineno, colno):
""" WebPage Event that occurs on webpage errors
"""
self._log.debug("""App.onerror event occurred in webpage:
\nMESSAGE:%s\nSOURCE:%s\nLINENO:%s\nCOLNO:%s\n"""%(message, source, lineno, colno))
def ononline(self, emitter):
""" WebPage Event that occurs on webpage goes online after a disconnection
"""
self._log.debug('App.ononline event occurred')
def onpagehide(self, emitter):
""" WebPage Event that occurs on webpage when the user navigates away
"""
self._log.debug('App.onpagehide event occurred')
def onpageshow(self, emitter):
""" WebPage Event that occurs on webpage gets shown
"""
self._log.debug('App.onpageshow event occurred')
def onresize(self, emitter, width, height):
""" WebPage Event that occurs on webpage gets resized
"""
self._log.debug('App.onresize event occurred. Width:%s Height:%s'%(width, height))
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
daemon_threads = False
# noinspection PyPep8Naming
def __init__(self, server_address, RequestHandlerClass,
auth, multiple_instance, enable_file_cache, update_interval,
websocket_timeout_timer_ms, pending_messages_queue_length,
title, server_starter_instance, certfile, keyfile, ssl_version, *userdata):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.auth = auth
self.multiple_instance = multiple_instance
self.enable_file_cache = enable_file_cache
self.update_interval = update_interval
self.websocket_timeout_timer_ms = websocket_timeout_timer_ms
self.pending_messages_queue_length = pending_messages_queue_length
self.title = title
self.server_starter_instance = server_starter_instance
self.userdata = userdata
self.certfile = certfile
self.keyfile = keyfile
self.ssl_version = ssl_version
if self.ssl_version!=None:
self.socket = ssl.wrap_socket(self.socket, keyfile=self.keyfile, certfile=self.certfile, server_side=True, ssl_version=self.ssl_version, do_handshake_on_connect=True)
class Server(object):
# noinspection PyShadowingNames
def __init__(self, gui_class, title='', start=True, address='127.0.0.1', port=0, username=None, password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=True,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000,
certfile=None, keyfile=None, ssl_version=None, userdata=()):
self._gui = gui_class
self._title = title or gui_class.__name__
self._sserver = None
self._sth = None
self._base_address = ''
self._address = address
self._sport = port
self._multiple_instance = multiple_instance
self._enable_file_cache = enable_file_cache
self._update_interval = update_interval
self._start_browser = start_browser
self._websocket_timeout_timer_ms = websocket_timeout_timer_ms
self._pending_messages_queue_length = pending_messages_queue_length
self._certfile = certfile
self._keyfile = keyfile
self._ssl_version = ssl_version
self._userdata = userdata
if username and password:
self._auth = base64.b64encode(encode_text("%s:%s" % (username, password)))
else:
self._auth = None
if not isinstance(userdata, tuple):
raise ValueError('userdata must be a tuple')
self._log = logging.getLogger('remi.server')
self._alive = True
if start:
self._myid = threading.Thread.ident
self.start()
self.serve_forever()
@property
def title(self):
return self._title
@property
def address(self):
return self._base_address
def start(self):
# Create a web server and define the handler to manage the incoming
# request
self._sserver = ThreadedHTTPServer((self._address, self._sport), self._gui, self._auth,
self._multiple_instance, self._enable_file_cache,
self._update_interval, self._websocket_timeout_timer_ms,
self._pending_messages_queue_length, self._title,
self, self._certfile, self._keyfile, self._ssl_version, *self._userdata)
shost, sport = self._sserver.socket.getsockname()[:2]
self._log.info('Started httpserver http://%s:%s/'%(shost,sport))
# when listening on multiple net interfaces the browsers connects to localhost
if shost == '0.0.0.0':
shost = '127.0.0.1'
self._base_address = 'http://%s:%s/' % (shost,sport)
if self._start_browser:
try:
import android
android.webbrowser.open(self._base_address)
except ImportError:
# use default browser instead of always forcing IE on Windows
if os.name == 'nt':
webbrowser.get('windows-default').open(self._base_address)
else:
webbrowser.open(self._base_address)
self._sth = threading.Thread(target=self._sserver.serve_forever)
self._sth.daemon = False
self._sth.start()
def serve_forever(self):
# we could join on the threads, but join blocks all interrupts (including
# ctrl+c, so just spin here
# noinspection PyBroadException
try:
def sig_manager(sig, callstack):
self.stop()
self._log.info('*** signal %d received.' % sig)
return signal.SIG_IGN
prev_handler = signal.signal(signal.SIGINT, sig_manager)
except Exception:
# signal.pause() is missing for Windows; wait 1ms and loop instead
pass
except KeyboardInterrupt:
pass
while self._alive:
try:
time.sleep(1)
except:
self._alive = False
self._log.debug(' ** serve_forever() quitting')
def stop(self):
global clients
self._alive = False
self._sserver.shutdown()
for client in clients.values():
client.on_close()
class StandaloneServer(Server):
def __init__(self, gui_class, title='', width=800, height=600, resizable=True, fullscreen=False, start=True,
userdata=()):
Server.__init__(self, gui_class, title=title, start=False, address='127.0.0.1', port=0, username=None,
password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=False,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000, userdata=userdata)
self._application_conf = {'width': width, 'height': height, 'resizable': resizable, 'fullscreen': fullscreen}
if start:
self.serve_forever()
def serve_forever(self):
try:
import webview
except ImportError:
raise ImportError('PyWebView is missing. Please install it by:\n '
'pip install pywebview\n '
'more info at https://github.com/r0x0r/pywebview')
else:
Server.start(self)
webview.create_window(self.title, self.address, **self._application_conf)
webview.start()
Server.stop(self)
def start(main_gui_class, **kwargs):
"""This method starts the webserver with a specific App subclass."""
debug = kwargs.pop('debug', False)
standalone = kwargs.pop('standalone', False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format='%(name)-16s %(levelname)-8s %(message)s')
logging.getLogger('remi').setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(main_gui_class, start=True, **kwargs)
else:
s = Server(main_gui_class, start=True, **kwargs)
|
parallel.py
|
from __future__ import absolute_import
import multiprocessing as mp
from builtins import range
from . import loadable
# Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved.
# See the LICENSE file for licensing terms (BSD-style).
def _parallel_job(factory, args, queue, index):
"""Helper function to start up an input queue in a subprocess."""
data = factory(*args)
for sample in data:
sample["__thread__"] = index
queue.put(sample)
def parallelize_input(factory, args=(), nthreads=4, maxsize=256):
"""Parallelize an input queue.
:param factory: factory function for input queue
:param args: arguments to factory function
:param nthreads: number of subprocesses
:param maxsize: maximum queue size for input queue
"""
queue = mp.Queue(maxsize)
jobs = [mp.Process(target=_parallel_job, args=(factory, args, queue, i))
for i in range(nthreads)]
for job in jobs:
job.start()
while True:
sample = queue.get()
sample["__queue_size__"] = queue.qsize()
yield sample
def _factory(fname, method):
"""Helper function for parallel_load."""
inputs = loadable.load_input(fname)
f = getattr(inputs, method)
return f()
def parallel_load(fname, method="training_data", nthreads=4, maxsize=256):
"""Load the input pipeline and execute it in parallel.
:param fname: filename containing the input pipepline (.py)
:param method: method on ``Inputs`` object (default: training_data)
:param nthreads: number of subprocesses
:param maxsize: maximum queue size for input queue
"""
return parallelize_input(_factory, (fname, method), nthreads=nthreads, maxsize=maxsize)
|
docker_agent.py
|
import json
import time
import os
import threading
import requests
import docker
from . import BaseAgent
from .. import utility
from .. import characters
class DockerAgent(BaseAgent):
"""The Docker Agent that Connects to a Docker container where the character runs."""
def __init__(self,
docker_image,
port,
server='http://localhost',
character=characters.Bomber,
docker_client=None,
env_vars=None):
super(DockerAgent, self).__init__(character)
self._docker_image = docker_image
self._docker_client = docker_client
if not self._docker_client:
self._docker_client = docker.from_env()
self._docker_client.login(os.getenv("PLAYGROUND_DOCKER_LOGIN"),
os.getenv("PLAYGROUND_DOCKER_PASSWORD"))
self._acknowledged = False # Becomes True when the container is ready.
self._server = server
self._port = port
self._timeout = 32
self._container = None
self._env_vars = env_vars or {}
# Pass env variables starting with DOCKER_AGENT to the container.
for key, value in os.environ.items():
if not key.startswith("DOCKER_AGENT_"):
continue
env_key = key.replace("DOCKER_AGENT_", "")
self._env_vars[env_key] = value
# Start the docker agent if it is on this computer. Otherwise, it's far
# away and we need to tell that server to start it.
if 'localhost' in server:
container_thread = threading.Thread(
target=self._run_container, daemon=True)
container_thread.start()
print("Waiting for docker agent at {}:{}...".format(server, port))
self._wait_for_docker()
else:
request_url = "{}:8000/run_container".format(server)
request_json = {'docker_image': self._docker_image,
'env_vars': self._env_vars,
'port': port}
requests.post(request_url, json=request_json)
waiting_thread = threading.Thread(
target=self._wait_for_docker, daemon=True)
waiting_thread.start()
def _run_container(self):
print("Starting container...")
self._container = self._docker_client.containers.run(
self._docker_image,
detach=True,
auto_remove=True,
ports={10080: self._port},
environment=self._env_vars)
for line in self._container.logs(stream=True):
print(line.decode("utf-8").strip())
def _wait_for_docker(self):
"""Wait for network service to appear. A timeout of 0 waits forever."""
timeout = self._timeout
backoff = .25
max_backoff = min(timeout, 16)
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = time.time() + timeout
while True:
try:
now = time.time()
if timeout and end < now:
print("Timed out - %s:%s" % (self._server, self._port))
raise
request_url = '%s:%s/ping' % (self._server, self._port)
req = requests.get(request_url)
self._acknowledged = True
return True
except requests.exceptions.ConnectionError as e:
print("ConnectionError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except requests.exceptions.HTTPError as e:
print("HTTPError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except docker.errors.APIError as e:
print("This is a Docker error. Please fix: ", e)
raise
def act(self, obs, action_space):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
request_url = "http://localhost:{}/action".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.15,
json={
"obs":
obs_serialized,
"action_space":
json.dumps(action_space, cls=utility.PommermanJSONEncoder)
})
action = req.json()['action']
except requests.exceptions.Timeout as e:
print('Timeout!')
# TODO: Fix this. It's ugly.
action = [0] * len(action_space.shape)
if len(action) == 1:
action = action[0]
return action
def shutdown(self):
print("Stopping container..")
if self._container:
try:
return self._container.remove(force=True)
except docker.errors.NotFound as e:
return True
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_allclose, c, arguments,
expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(LocalComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
safaribooks.py
|
#!/usr/bin/env python3
# coding: utf-8
import re
import os
import sys
import json
import shutil
import pathlib
import getpass
import logging
import argparse
import requests
import traceback
from html import escape
from random import random
from lxml import html, etree
from multiprocessing import Process, Queue, Value
from urllib.parse import urljoin, urlparse, parse_qs, quote_plus
PATH = os.path.dirname(os.path.realpath(__file__))
COOKIES_FILE = os.path.join(PATH, "cookies.json")
ORLY_BASE_HOST = "oreilly.com" # PLEASE INSERT URL HERE
SAFARI_BASE_HOST = "learning." + ORLY_BASE_HOST
API_ORIGIN_HOST = "api." + ORLY_BASE_HOST
ORLY_BASE_URL = "https://www." + ORLY_BASE_HOST
SAFARI_BASE_URL = "https://" + SAFARI_BASE_HOST
API_ORIGIN_URL = "https://" + API_ORIGIN_HOST
PROFILE_URL = SAFARI_BASE_URL + "/profile/"
# DEBUG
USE_PROXY = False
PROXIES = {"https": "https://127.0.0.1:8080"}
class Display:
BASE_FORMAT = logging.Formatter(
fmt="[%(asctime)s] %(message)s",
datefmt="%d/%b/%Y %H:%M:%S"
)
SH_DEFAULT = "\033[0m" if "win" not in sys.platform else "" # TODO: colors for Windows
SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""
def __init__(self, log_file):
self.output_dir = ""
self.output_dir_set = False
self.log_file = os.path.join(PATH, log_file)
self.logger = logging.getLogger("SafariBooks")
self.logger.setLevel(logging.INFO)
logs_handler = logging.FileHandler(filename=self.log_file)
logs_handler.setFormatter(self.BASE_FORMAT)
logs_handler.setLevel(logging.INFO)
self.logger.addHandler(logs_handler)
self.columns, _ = shutil.get_terminal_size()
self.logger.info("** Welcome to SafariBooks! **")
self.book_ad_info = False
self.css_ad_info = Value("i", 0)
self.images_ad_info = Value("i", 0)
self.last_request = (None,)
self.in_error = False
self.state_status = Value("i", 0)
sys.excepthook = self.unhandled_exception
def set_output_dir(self, output_dir):
self.info("Output directory:\n %s" % output_dir)
self.output_dir = output_dir
self.output_dir_set = True
def unregister(self):
self.logger.handlers[0].close()
sys.excepthook = sys.__excepthook__
def log(self, message):
try:
self.logger.info(str(message, "utf-8", "replace"))
except (UnicodeDecodeError, Exception):
self.logger.info(message)
def out(self, put):
pattern = "\r{!s}\r{!s}\n"
try:
s = pattern.format(" " * self.columns, str(put, "utf-8", "replace"))
except TypeError:
s = pattern.format(" " * self.columns, put)
sys.stdout.write(s)
def info(self, message, state=False):
self.log(message)
output = (self.SH_YELLOW + "[*]" + self.SH_DEFAULT if not state else
self.SH_BG_YELLOW + "[-]" + self.SH_DEFAULT) + " %s" % message
self.out(output)
def error(self, error):
if not self.in_error:
self.in_error = True
self.log(error)
output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
self.out(output)
def exit(self, error):
self.error(str(error))
if self.output_dir_set:
output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
" Please delete the output directory '" + self.output_dir + "'"
" and restart the program.")
self.out(output)
output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
self.out(output)
self.save_last_request()
sys.exit(1)
def unhandled_exception(self, _, o, tb):
self.log("".join(traceback.format_tb(tb)))
self.exit("Unhandled Exception: %s (type: %s)" % (o, o.__class__.__name__))
def save_last_request(self):
if any(self.last_request):
self.log("Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
.format(*self.last_request))
def intro(self):
output = self.SH_YELLOW + ("""
____ ___ _
/ __/__ _/ _/__ _____(_)
_\ \/ _ `/ _/ _ `/ __/ /
/___/\_,_/_/ \_,_/_/ /_/
/ _ )___ ___ / /__ ___
/ _ / _ \/ _ \/ '_/(_-<
/____/\___/\___/_/\_\/___/
""" if random() > 0.5 else """
██████╗ ██████╗ ██╗ ██╗ ██╗██████╗
██╔═══██╗ ██╔══██╗██║ ╚██╗ ██╔╝╚════██╗
██║ ██║ ██████╔╝██║ ╚████╔╝ ▄███╔╝
██║ ██║ ██╔══██╗██║ ╚██╔╝ ▀▀══╝
╚██████╔╝ ██║ ██║███████╗██║ ██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""") + self.SH_DEFAULT
output += "\n" + "~" * (self.columns // 2)
self.out(output)
def parse_description(self, desc):
if not desc:
return "n/d"
try:
return html.fromstring(desc).text_content()
except (html.etree.ParseError, html.etree.ParserError) as e:
self.log("Error parsing the description: %s" % e)
return "n/d"
def book_info(self, info):
description = self.parse_description(info.get("description", None)).replace("\n", " ")
for t in [
("Title", info.get("title", "")), ("Authors", ", ".join(aut.get("name", "") for aut in info.get("authors", []))),
("Identifier", info.get("identifier", "")), ("ISBN", info.get("isbn", "")),
("Publishers", ", ".join(pub.get("name", "") for pub in info.get("publishers", []))),
("Rights", info.get("rights", "")),
("Description", description[:500] + "..." if len(description) >= 500 else description),
("Release Date", info.get("issued", "")),
("URL", info.get("web_url", ""))
]:
self.info("{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT, t[1]), True)
def state(self, origin, done):
progress = int(done * 100 / origin)
bar = int(progress * (self.columns - 11) / 100)
if self.state_status.value < progress:
self.state_status.value = progress
sys.stdout.write(
"\r " + self.SH_BG_YELLOW + "[" + ("#" * bar).ljust(self.columns - 11, "-") + "]" +
self.SH_DEFAULT + ("%4s" % progress) + "%" + ("\n" if progress == 100 else "")
)
def done(self, epub_file):
self.info("Done: %s\n\n" % epub_file +
" If you like it, please * this project on GitHub to make it known:\n"
" https://github.com/lorenzodifuccia/safaribooks\n"
" e don't forget to renew your Safari Books Online subscription:\n"
" " + SAFARI_BASE_URL + "\n\n" +
self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Bye!!")
@staticmethod
def api_error(response):
message = "API: "
if "detail" in response and "Not found" in response["detail"]:
message += "book's not present in Safari Books Online.\n" \
" The book identifier is the digits that you can find in the URL:\n" \
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
else:
os.remove(COOKIES_FILE)
message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
" Use the `--cred` or `--login` options in order to perform the auth login to Safari."
return message
class WinQueue(list): # TODO: error while use `process` in Windows: can't pickle _thread.RLock objects
def put(self, el):
self.append(el)
def qsize(self):
return self.__len__()
class SafariBooks:
LOGIN_URL = ORLY_BASE_URL + "/member/auth/login/"
LOGIN_ENTRY_URL = SAFARI_BASE_URL + "/login/unified/?next=/home/"
API_TEMPLATE = SAFARI_BASE_URL + "/api/v1/book/{0}/"
BASE_01_HTML = "<!DOCTYPE html>\n" \
"<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
" xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
" http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
" xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
"<head>\n" \
"{0}\n" \
"<style type=\"text/css\">" \
"body{{margin:1em;background-color:transparent!important;}}" \
"#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"
KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
"word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
"{{overflow-x:unset!important;overflow:unset!important;" \
"overflow-y:unset!important;white-space:pre-wrap!important;}}"
BASE_02_HTML = "</style>" \
"</head>\n" \
"<body>{1}</body>\n</html>"
CONTAINER_XML = "<?xml version=\"1.0\"?>" \
"<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
"<rootfiles>" \
"<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
"</rootfiles>" \
"</container>"
# Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \
"<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
"<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
" xmlns:opf=\"http://www.idpf.org/2007/opf\">\n" \
"<dc:title>{1}</dc:title>\n" \
"{2}\n" \
"<dc:description>{3}</dc:description>\n" \
"{4}" \
"<dc:publisher>{5}</dc:publisher>\n" \
"<dc:rights>{6}</dc:rights>\n" \
"<dc:language>en-US</dc:language>\n" \
"<dc:date>{7}</dc:date>\n" \
"<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
"<meta name=\"cover\" content=\"{8}\"/>\n" \
"</metadata>\n" \
"<manifest>\n" \
"<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
"{9}\n" \
"</manifest>\n" \
"<spine toc=\"ncx\">\n{10}</spine>\n" \
"<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
"</package>"
# Format: ID, Depth, Title, Author, NAVMAP
TOC_NCX = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\n" \
"<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
" \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">\n" \
"<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\n" \
"<head>\n" \
"<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>\n" \
"<meta content=\"{1}\" name=\"dtb:depth\"/>\n" \
"<meta content=\"0\" name=\"dtb:totalPageCount\"/>\n" \
"<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\n" \
"</head>\n" \
"<docTitle><text>{2}</text></docTitle>\n" \
"<docAuthor><text>{3}</text></docAuthor>\n" \
"<navMap>{4}</navMap>\n" \
"</ncx>"
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": LOGIN_ENTRY_URL,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/90.0.4430.212 Safari/537.36"
}
COOKIE_FLOAT_MAX_AGE_PATTERN = re.compile(r'(max-age=\d*\.\d*)', re.IGNORECASE)
def __init__(self, args):
self.args = args
self.display = Display("info_%s.log" % escape(args.bookid))
self.display.intro()
self.session = requests.Session()
if USE_PROXY: # DEBUG
self.session.proxies = PROXIES
self.session.verify = False
self.session.headers.update(self.HEADERS)
self.jwt = {}
if not args.cred:
if not os.path.isfile(COOKIES_FILE):
self.display.exit("Login: unable to find `cookies.json` file.\n"
" Please use the `--cred` or `--login` options to perform the login.")
self.session.cookies.update(json.load(open(COOKIES_FILE)))
else:
self.display.info("Logging into Safari Books Online...", state=True)
self.do_login(*args.cred)
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, 'w'))
self.check_login()
self.book_id = args.bookid
self.api_url = self.API_TEMPLATE.format(self.book_id)
self.display.info("Retrieving book info...")
self.book_info = self.get_book_info()
self.display.book_info(self.book_info)
self.display.info("Retrieving book chapters...")
self.book_chapters = self.get_book_chapters()
self.chapters_queue = self.book_chapters[:]
if len(self.book_chapters) > sys.getrecursionlimit():
sys.setrecursionlimit(len(self.book_chapters))
self.book_title = self.book_info["title"]
self.base_url = self.book_info["web_url"]
self.clean_book_title = "".join(self.escape_dirname(self.book_title).split(",")[:2]) \
+ " ({0})".format(self.book_id)
books_dir = os.path.join(PATH, "Books")
if not os.path.isdir(books_dir):
os.mkdir(books_dir)
self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
self.display.set_output_dir(self.BOOK_PATH)
self.css_path = ""
self.images_path = ""
self.create_dirs()
self.chapter_title = ""
self.filename = ""
self.chapter_stylesheets = []
self.css = []
self.images = []
self.display.info("Downloading book contents... (%s chapters)" % len(self.book_chapters), state=True)
self.BASE_HTML = self.BASE_01_HTML + (self.KINDLE_HTML if not args.kindle else "") + self.BASE_02_HTML
self.cover = False
self.get()
if not self.cover:
self.cover = self.get_default_cover() if "cover" in self.book_info else False
cover_html = self.parse_html(
html.fromstring("<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>".format(self.cover)), True
)
self.book_chapters = [{
"filename": "default_cover.xhtml",
"title": "Cover"
}] + self.book_chapters
self.filename = self.book_chapters[0]["filename"]
self.save_page_html(cover_html)
self.css_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book CSSs... (%s files)" % len(self.css), state=True)
self.collect_css()
self.images_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book images... (%s files)" % len(self.images), state=True)
self.collect_images()
self.display.info("Creating EPUB file...", state=True)
self.create_epub()
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, "w"))
self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
self.display.unregister()
if not self.display.in_error and not args.log:
os.remove(self.display.log_file)
def handle_cookie_update(self, set_cookie_headers):
for morsel in set_cookie_headers:
# Handle Float 'max-age' Cookie
if self.COOKIE_FLOAT_MAX_AGE_PATTERN.search(morsel):
cookie_key, cookie_value = morsel.split(";")[0].split("=")
self.session.cookies.set(cookie_key, cookie_value)
def requests_provider(self, url, is_post=False, data=None, perform_redirect=True, **kwargs):
try:
response = getattr(self.session, "post" if is_post else "get")(
url,
data=data,
allow_redirects=False,
**kwargs
)
self.handle_cookie_update(response.raw.headers.getlist("Set-Cookie"))
self.display.last_request = (
url, data, kwargs, response.status_code, "\n".join(
["\t{}: {}".format(*h) for h in response.headers.items()]
), response.text
)
except (requests.ConnectionError, requests.ConnectTimeout, requests.RequestException) as request_exception:
self.display.error(str(request_exception))
return 0
if response.is_redirect and perform_redirect:
return self.requests_provider(response.next.url, is_post, None, perform_redirect)
# TODO How about **kwargs?
return response
@staticmethod
def parse_cred(cred):
if ":" not in cred:
return False
sep = cred.index(":")
new_cred = ["", ""]
new_cred[0] = cred[:sep].strip("'").strip('"')
if "@" not in new_cred[0]:
return False
new_cred[1] = cred[sep + 1:]
return new_cred
def do_login(self, email, password):
response = self.requests_provider(self.LOGIN_ENTRY_URL)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
next_parameter = None
try:
next_parameter = parse_qs(urlparse(response.request.url).query)["next"][0]
except (AttributeError, ValueError, IndexError):
self.display.exit("Login: unable to complete login on Safari Books Online. Try again...")
redirect_uri = API_ORIGIN_URL + quote_plus(next_parameter)
response = self.requests_provider(
self.LOGIN_URL,
is_post=True,
json={
"email": email,
"password": password,
"redirect_uri": redirect_uri
},
perform_redirect=False
)
if response == 0:
self.display.exit("Login: unable to perform auth to Safari Books Online.\n Try again...")
if response.status_code != 200: # TODO To be reviewed
try:
error_page = html.fromstring(response.text)
errors_message = error_page.xpath("//ul[@class='errorlist']//li/text()")
recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
messages = ([" `%s`" % error for error in errors_message
if "password" in error or "email" in error] if len(errors_message) else []) + \
([" `ReCaptcha required (wait or do logout from the website).`"] if len(
recaptcha) else [])
self.display.exit(
"Login: unable to perform auth login to Safari Books Online.\n" + self.display.SH_YELLOW +
"[*]" + self.display.SH_DEFAULT + " Details:\n" + "%s" % "\n".join(
messages if len(messages) else [" Unexpected error!"])
)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Login: your login went wrong and it encountered in an error"
" trying to parse the login details of Safari Books Online. Try again..."
)
self.jwt = response.json() # TODO: save JWT Tokens and use the refresh_token to restore user session
response = self.requests_provider(self.jwt["redirect_uri"])
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
def check_login(self):
response = self.requests_provider(PROFILE_URL, perform_redirect=False)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
elif response.status_code != 200:
self.display.exit("Authentication issue: unable to access profile page.")
elif "user_type\":\"Expired\"" in response.text:
self.display.exit("Authentication issue: account subscription expired.")
self.display.info("Successfully authenticated.", state=True)
def get_book_info(self):
response = self.requests_provider(self.api_url)
if response == 0:
self.display.exit("API: unable to retrieve book info.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "last_chapter_read" in response:
del response["last_chapter_read"]
for key, value in response.items():
if value is None:
response[key] = 'n/a'
return response
def get_book_chapters(self, page=1):
response = self.requests_provider(urljoin(self.api_url, "chapter/?page=%s" % page))
if response == 0:
self.display.exit("API: unable to retrieve book chapters.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "results" not in response or not len(response["results"]):
self.display.exit("API: unable to retrieve book chapters.")
if response["count"] > sys.getrecursionlimit():
sys.setrecursionlimit(response["count"])
result = []
result.extend([c for c in response["results"] if "cover" in c["filename"] or "cover" in c["title"]])
for c in result:
del response["results"][response["results"].index(c)]
result += response["results"]
return result + (self.get_book_chapters(page + 1) if response["next"] else [])
def get_default_cover(self):
response = self.requests_provider(self.book_info["cover"], stream=True)
if response == 0:
self.display.error("Error trying to retrieve the cover: %s" % self.book_info["cover"])
return False
file_ext = response.headers["Content-Type"].split("/")[-1]
with open(os.path.join(self.images_path, "default_cover." + file_ext), 'wb') as i:
for chunk in response.iter_content(1024):
i.write(chunk)
return "default_cover." + file_ext
def get_html(self, url):
response = self.requests_provider(url)
if response == 0 or response.status_code != 200:
self.display.exit(
"Crawler: error trying to retrieve this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
root = None
try:
root = html.fromstring(response.text, base_url=SAFARI_BASE_URL)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Crawler: error trying to parse this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
return root
@staticmethod
def url_is_absolute(url):
return bool(urlparse(url).netloc)
@staticmethod
def is_image_link(url: str):
return pathlib.Path(url).suffix[1:].lower() in ["jpg", "jpeg", "png", "gif"]
def link_replace(self, link):
if link and not link.startswith("mailto"):
if not self.url_is_absolute(link):
if any(x in link for x in ["cover", "images", "graphics"]) or \
self.is_image_link(link):
image = link.split("/")[-1]
return "Images/" + image
return link.replace(".html", ".xhtml")
else:
if self.book_id in link:
return self.link_replace(link.split(self.book_id)[-1])
return link
@staticmethod
def get_cover(html_root):
lowercase_ns = etree.FunctionNamespace(None)
lowercase_ns["lower-case"] = lambda _, n: n[0].lower() if n and len(n) else ""
images = html_root.xpath("//img[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover') or"
"contains(lower-case(@alt), 'cover')]")
if len(images):
return images[0]
divs = html_root.xpath("//div[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(divs):
return divs[0]
a = html_root.xpath("//a[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(a):
return a[0]
return None
def parse_html(self, root, first_page=False):
if random() > 0.8:
if len(root.xpath("//div[@class='controls']/a/text()")):
self.display.exit(self.display.api_error(" "))
book_content = root.xpath("//div[@id='sbo-rt-content']")
if not len(book_content):
self.display.exit(
"Parser: book content's corrupted or not present: %s (%s)" %
(self.filename, self.chapter_title)
)
page_css = ""
if len(self.chapter_stylesheets):
for chapter_css_url in self.chapter_stylesheets:
if chapter_css_url not in self.css:
self.css.append(chapter_css_url)
self.display.log("Crawler: found a new CSS at %s" % chapter_css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(chapter_css_url))
stylesheet_links = root.xpath("//link[@rel='stylesheet']")
if len(stylesheet_links):
for s in stylesheet_links:
css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
else urljoin(self.base_url, s.attrib["href"])
if css_url not in self.css:
self.css.append(css_url)
self.display.log("Crawler: found a new CSS at %s" % css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(css_url))
stylesheets = root.xpath("//style")
if len(stylesheets):
for css in stylesheets:
if "data-template" in css.attrib and len(css.attrib["data-template"]):
css.text = css.attrib["data-template"]
del css.attrib["data-template"]
try:
page_css += html.tostring(css, method="xml", encoding='unicode') + "\n"
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse one CSS found in this page: %s (%s)" %
(self.filename, self.chapter_title)
)
# TODO: add all not covered tag for `link_replace` function
svg_image_tags = root.xpath("//image")
if len(svg_image_tags):
for img in svg_image_tags:
image_attr_href = [x for x in img.attrib.keys() if "href" in x]
if len(image_attr_href):
svg_url = img.attrib.get(image_attr_href[0])
svg_root = img.getparent().getparent()
new_img = svg_root.makeelement("img")
new_img.attrib.update({"src": svg_url})
svg_root.remove(img.getparent())
svg_root.append(new_img)
book_content = book_content[0]
book_content.rewrite_links(self.link_replace)
xhtml = None
try:
if first_page:
is_cover = self.get_cover(book_content)
if is_cover is not None:
page_css = "<style>" \
"body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
"#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
"img{height:90vh;margin-left:auto;margin-right:auto;}" \
"</style>"
cover_html = html.fromstring("<div id=\"Cover\"></div>")
cover_div = cover_html.xpath("//div")[0]
cover_img = cover_div.makeelement("img")
cover_img.attrib.update({"src": is_cover.attrib["src"]})
cover_div.append(cover_img)
book_content = cover_html
self.cover = is_cover.attrib["src"]
xhtml = html.tostring(book_content, method="xml", encoding='unicode')
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse HTML of this page: %s (%s)" %
(self.filename, self.chapter_title)
)
return page_css, xhtml
@staticmethod
def escape_dirname(dirname, clean_space=False):
if ":" in dirname:
if dirname.index(":") > 15:
dirname = dirname.split(":")[0]
elif "win" in sys.platform:
dirname = dirname.replace(":", ",")
for ch in ['~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/', '`', '\'', '"', '|', '+', ':']:
if ch in dirname:
dirname = dirname.replace(ch, "_")
return dirname if not clean_space else dirname.replace(" ", "")
def create_dirs(self):
if os.path.isdir(self.BOOK_PATH):
self.display.log("Book directory already exists: %s" % self.BOOK_PATH)
else:
os.makedirs(self.BOOK_PATH)
oebps = os.path.join(self.BOOK_PATH, "OEBPS")
if not os.path.isdir(oebps):
self.display.book_ad_info = True
os.makedirs(oebps)
self.css_path = os.path.join(oebps, "Styles")
if os.path.isdir(self.css_path):
self.display.log("CSSs directory already exists: %s" % self.css_path)
else:
os.makedirs(self.css_path)
self.display.css_ad_info.value = 1
self.images_path = os.path.join(oebps, "Images")
if os.path.isdir(self.images_path):
self.display.log("Images directory already exists: %s" % self.images_path)
else:
os.makedirs(self.images_path)
self.display.images_ad_info.value = 1
def save_page_html(self, contents):
self.filename = self.filename.replace(".html", ".xhtml")
open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb") \
.write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
self.display.log("Created: %s" % self.filename)
def get(self):
len_books = len(self.book_chapters)
for _ in range(len_books):
if not len(self.chapters_queue):
return
first_page = len_books == len(self.chapters_queue)
next_chapter = self.chapters_queue.pop(0)
self.chapter_title = next_chapter["title"]
self.filename = next_chapter["filename"]
asset_base_url = next_chapter['asset_base_url']
api_v2_detected = False
if 'v2' in next_chapter['content']:
asset_base_url = SAFARI_BASE_URL + "/api/v2/epubs/urn:orm:book:{}/files".format(self.book_id)
api_v2_detected = True
if "images" in next_chapter and len(next_chapter["images"]):
for img_url in next_chapter['images']:
if api_v2_detected:
self.images.append(asset_base_url + '/' + img_url)
else:
self.images.append(urljoin(next_chapter['asset_base_url'], img_url))
# Stylesheets
self.chapter_stylesheets = []
if "stylesheets" in next_chapter and len(next_chapter["stylesheets"]):
self.chapter_stylesheets.extend(x["url"] for x in next_chapter["stylesheets"])
if "site_styles" in next_chapter and len(next_chapter["site_styles"]):
self.chapter_stylesheets.extend(next_chapter["site_styles"])
if os.path.isfile(os.path.join(self.BOOK_PATH, "OEBPS", self.filename.replace(".html", ".xhtml"))):
if not self.display.book_ad_info and \
next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
self.display.info(
("File `%s` already exists.\n"
" If you want to download again all the book,\n"
" please delete the output directory '" + self.BOOK_PATH + "' and restart the program.")
% self.filename.replace(".html", ".xhtml")
)
self.display.book_ad_info = 2
else:
self.save_page_html(self.parse_html(self.get_html(next_chapter["content"]), first_page))
self.display.state(len_books, len_books - len(self.chapters_queue))
def _thread_download_css(self, url):
css_file = os.path.join(self.css_path, "Style{0:0>2}.css".format(self.css.index(url)))
if os.path.isfile(css_file):
if not self.display.css_ad_info.value and url not in self.css[:self.css.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the CSSs,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
css_file)
self.display.css_ad_info.value = 1
else:
response = self.requests_provider(url)
if response == 0:
self.display.error("Error trying to retrieve this CSS: %s\n From: %s" % (css_file, url))
with open(css_file, 'wb') as s:
s.write(response.content)
self.css_done_queue.put(1)
self.display.state(len(self.css), self.css_done_queue.qsize())
def _thread_download_images(self, url):
image_name = url.split("/")[-1]
image_path = os.path.join(self.images_path, image_name)
if os.path.isfile(image_path):
if not self.display.images_ad_info.value and url not in self.images[:self.images.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the images,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
image_name)
self.display.images_ad_info.value = 1
else:
response = self.requests_provider(urljoin(SAFARI_BASE_URL, url), stream=True)
if response == 0:
self.display.error("Error trying to retrieve this image: %s\n From: %s" % (image_name, url))
return
with open(image_path, 'wb') as img:
for chunk in response.iter_content(1024):
img.write(chunk)
self.images_done_queue.put(1)
self.display.state(len(self.images), self.images_done_queue.qsize())
def _start_multiprocessing(self, operation, full_queue):
if len(full_queue) > 5:
for i in range(0, len(full_queue), 5):
self._start_multiprocessing(operation, full_queue[i:i + 5])
else:
process_queue = [Process(target=operation, args=(arg,)) for arg in full_queue]
for proc in process_queue:
proc.start()
for proc in process_queue:
proc.join()
def collect_css(self):
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for css_url in self.css:
self._thread_download_css(css_url)
def collect_images(self):
if self.display.book_ad_info == 2:
self.display.info("Some of the book contents were already downloaded.\n"
" If you want to be sure that all the images will be downloaded,\n"
" please delete the output directory '" + self.BOOK_PATH +
"' and restart the program.")
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for image_url in self.images:
self._thread_download_images(image_url)
def create_content_opf(self):
self.css = next(os.walk(self.css_path))[2]
self.images = next(os.walk(self.images_path))[2]
manifest = []
spine = []
for c in self.book_chapters:
c["filename"] = c["filename"].replace(".html", ".xhtml")
item_id = escape("".join(c["filename"].split(".")[:-1]))
manifest.append("<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />".format(
item_id, c["filename"]
))
spine.append("<itemref idref=\"{0}\"/>".format(item_id))
for i in set(self.images):
dot_split = i.split(".")
head = "img_" + escape("".join(dot_split[:-1]))
extension = dot_split[-1]
manifest.append("<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />".format(
head, i, "jpeg" if "jp" in extension else extension
))
for i in range(len(self.css)):
manifest.append("<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
"media-type=\"text/css\" />".format(i))
authors = "\n".join("<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>".format(
escape(aut.get("name", "n/d"))
) for aut in self.book_info.get("authors", []))
subjects = "\n".join("<dc:subject>{0}</dc:subject>".format(escape(sub.get("name", "n/d")))
for sub in self.book_info.get("subjects", []))
return self.CONTENT_OPF.format(
(self.book_info.get("isbn", self.book_id)),
escape(self.book_title),
authors,
escape(self.book_info.get("description", "")),
subjects,
", ".join(escape(pub.get("name", "")) for pub in self.book_info.get("publishers", [])),
escape(self.book_info.get("rights", "")),
self.book_info.get("issued", ""),
self.cover,
"\n".join(manifest),
"\n".join(spine),
self.book_chapters[0]["filename"].replace(".html", ".xhtml")
)
@staticmethod
def parse_toc(l, c=0, mx=0):
r = ""
for cc in l:
c += 1
if int(cc["depth"]) > mx:
mx = int(cc["depth"])
r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
"<navLabel><text>{2}</text></navLabel>" \
"<content src=\"{3}\"/>".format(
cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
)
if cc["children"]:
sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
r += sr
r += "</navPoint>\n"
return r, c, mx
def create_toc(self):
response = self.requests_provider(urljoin(self.api_url, "toc/"))
if response == 0:
self.display.exit("API: unable to retrieve book chapters. "
"Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!")
response = response.json()
if not isinstance(response, list) and len(response.keys()) == 1:
self.display.exit(
self.display.api_error(response) +
" Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!"
)
navmap, _, max_depth = self.parse_toc(response)
return self.TOC_NCX.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
max_depth,
self.book_title,
", ".join(aut.get("name", "") for aut in self.book_info.get("authors", [])),
navmap
)
def create_epub(self):
open(os.path.join(self.BOOK_PATH, "mimetype"), "w").write("application/epub+zip")
meta_info = os.path.join(self.BOOK_PATH, "META-INF")
if os.path.isdir(meta_info):
self.display.log("META-INF directory already exists: %s" % meta_info)
else:
os.makedirs(meta_info)
open(os.path.join(meta_info, "container.xml"), "wb").write(
self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"), "wb").write(
self.create_content_opf().encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"), "wb").write(
self.create_toc().encode("utf-8", "xmlcharrefreplace")
)
zip_file = os.path.join(PATH, "Books", self.book_id)
if os.path.isfile(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
os.rename(zip_file + ".zip", os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
# MAIN
if __name__ == "__main__":
arguments = argparse.ArgumentParser(prog="safaribooks.py",
description="Download and generate an EPUB of your favorite books"
" from Safari Books Online.",
add_help=False,
allow_abbrev=False)
login_arg_group = arguments.add_mutually_exclusive_group()
login_arg_group.add_argument(
"--cred", metavar="<EMAIL:PASS>", default=False,
help="Credentials used to perform the auth login on Safari Books Online."
" Es. ` --cred \"account_mail@mail.com:password01\" `."
)
login_arg_group.add_argument(
"--login", action='store_true',
help="Prompt for credentials used to perform the auth login on Safari Books Online."
)
arguments.add_argument(
"--no-cookies", dest="no_cookies", action='store_true',
help="Prevent your session data to be saved into `cookies.json` file."
)
arguments.add_argument(
"--kindle", dest="kindle", action='store_true',
help="Add some CSS rules that block overflow on `table` and `pre` elements."
" Use this option if you're going to export the EPUB to E-Readers like Amazon Kindle."
)
arguments.add_argument(
"--preserve-log", dest="log", action='store_true', help="Leave the `info_XXXXXXXXXXXXX.log`"
" file even if there isn't any error."
)
arguments.add_argument("--help", action="help", default=argparse.SUPPRESS, help='Show this help message.')
arguments.add_argument(
"bookid", metavar='<BOOK ID>',
help="Book digits ID that you want to download. You can find it in the URL (X-es):"
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
)
args_parsed = arguments.parse_args()
if args_parsed.cred or args_parsed.login:
user_email = ""
pre_cred = ""
if args_parsed.cred:
pre_cred = args_parsed.cred
else:
user_email = input("Email: ")
passwd = getpass.getpass("Password: ")
pre_cred = user_email + ":" + passwd
parsed_cred = SafariBooks.parse_cred(pre_cred)
if not parsed_cred:
arguments.error("invalid credential: %s" % (
args_parsed.cred if args_parsed.cred else (user_email + ":*******")
))
args_parsed.cred = parsed_cred
else:
if args_parsed.no_cookies:
arguments.error("invalid option: `--no-cookies` is valid only if you use the `--cred` option")
SafariBooks(args_parsed)
# Hint: do you want to download more then one book once, initialized more than one instance of `SafariBooks`...
sys.exit(0)
|
camera.py
|
"""camera.py
This code implements the Camera class, which encapsulates code to
handle IP CAM, USB webcam or the Jetson onboard camera. In
addition, this Camera class is further extended to take a video
file or an image file as input.
"""
import logging
import threading
import subprocess
import numpy as np
import cv2
# The following flag ise used to control whether to use a GStreamer
# pipeline to open USB webcam source. If set to False, we just open
# the webcam using cv2.VideoCapture(index) machinery. i.e. relying
# on cv2's built-in function to capture images from the webcam.
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--image', type=str, default=None,
help='image file name, e.g. dog.jpg')
parser.add_argument('--video', type=str, default=None,
help='video file name, e.g. traffic.mp4')
parser.add_argument('--video_looping', action='store_true',
help='loop around the video file [False]')
parser.add_argument('--rtsp', type=str, default=None,
help=('RTSP H.264 stream, e.g. '
'rtsp://admin:123456@192.168.1.64:554'))
parser.add_argument('--rtsp_latency', type=int, default=200,
help='RTSP latency in ms [200]')
parser.add_argument('--usb', type=int, default=None,
help='USB webcam device id (/dev/video?) [None]')
parser.add_argument('--gstr', type=str, default=None,
help='GStreamer string [None]')
parser.add_argument('--onboard', type=int, default=None,
help='Jetson onboard camera [None]')
parser.add_argument('--copy_frame', action='store_true',
help=('copy video frame internally [False]'))
parser.add_argument('--do_resize', action='store_true',
help=('resize image/video [False]'))
parser.add_argument('--width', type=int, default=640,
help='image width [640]')
parser.add_argument('--height', type=int, default=480,
help='image height [480]')
return parser
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
# Use hardware H.264 decoder on Jetson platforms
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Otherwise try to use the software decoder 'avdec_h264'
# NOTE: in case resizing images is necessary, try adding
# a 'videoscale' into the pipeline
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_gstr(gstr, width, height):
"""Open camera using a GStreamer string.
Example:
gstr = 'v4l2src device=/dev/video0 ! video/x-raw, width=(int){width}, height=(int){height} ! videoconvert ! appsink'
"""
gst_str = gstr.format(width=width, height=height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, you might need to add
# 'flip-method=2' into gst_str below.
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
# 反転(正解)
# cam.img_handle = cv2.flip(cam.img_handle, 0)
if cam.img_handle is None:
#logging.warning('Camera: cap.read() returns None...')
break
cam.thread_running = False
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Image (jpg, png, etc.) file, repeating indefinitely
2. Video file
3. RTSP (IP CAM)
4. USB webcam
5. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.video_file = ''
self.video_looping = args.video_looping
self.thread_running = False
self.img_handle = None
self.copy_frame = args.copy_frame
self.do_resize = args.do_resize
self.img_width = args.width
self.img_height = args.height
self.cap = None
self.thread = None
self._open() # try to open the camera
def _open(self):
"""Open camera based on command line arguments."""
if self.cap is not None:
raise RuntimeError('camera is already opened!')
a = self.args
if a.image:
logging.info('Camera: using a image file %s' % a.image)
self.cap = 'image'
self.img_handle = cv2.imread(a.image)
if self.img_handle is not None:
if self.do_resize:
self.img_handle = cv2.resize(
self.img_handle, (a.width, a.height))
self.is_opened = True
self.img_height, self.img_width, _ = self.img_handle.shape
elif a.video:
logging.info('Camera: using a video file %s' % a.video)
self.video_file = a.video
self.cap = cv2.VideoCapture(a.video)
self._start()
elif a.rtsp:
logging.info('Camera: using RTSP stream %s' % a.rtsp)
self.cap = open_cam_rtsp(a.rtsp, a.width, a.height, a.rtsp_latency)
self._start()
elif a.usb is not None:
logging.info('Camera: using USB webcam /dev/video%d' % a.usb)
self.cap = open_cam_usb(a.usb, a.width, a.height)
self._start()
elif a.gstr is not None:
logging.info('Camera: using GStreamer string "%s"' % a.gstr)
self.cap = open_cam_gstr(a.gstr, a.width, a.height)
self._start()
elif a.onboard is not None:
logging.info('Camera: using Jetson onboard camera')
self.cap = open_cam_onboard(a.width, a.height)
self._start()
else:
raise RuntimeError('no camera type specified!')
def isOpened(self):
return self.is_opened
def _start(self):
if not self.cap.isOpened():
logging.warning('Camera: starting while cap is not opened!')
return
# Try to grab the 1st image and determine width and height
_, self.img_handle = self.cap.read()
# 反転
# self.img_handle = cv2.rotate(self.img_handle, cv2.ROTATE_180)
# self.img_handle = cv2.flip(self.img_handle, 0)
if self.img_handle is None:
logging.warning('Camera: cap.read() returns no image!')
self.is_opened = False
return
self.is_opened = True
if self.video_file:
if not self.do_resize:
self.img_height, self.img_width, _ = self.img_handle.shape
else:
self.img_height, self.img_width, _ = self.img_handle.shape
# start the child thread if not using a video file source
# i.e. rtsp, usb or onboard
assert not self.thread_running
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def _stop(self):
if self.thread_running:
self.thread_running = False
#self.thread.join()
def read(self):
"""Read a frame from the camera object.
Returns None if the camera runs out of image or error.
"""
if not self.is_opened:
return None
if self.video_file:
_, img = self.cap.read()
if img is None:
logging.info('Camera: reaching end of video file')
if self.video_looping:
self.cap.release()
self.cap = cv2.VideoCapture(self.video_file)
_, img = self.cap.read()
if img is not None and self.do_resize:
img = cv2.resize(img, (self.img_width, self.img_height))
return img
elif self.cap == 'image':
return np.copy(self.img_handle)
else:
if self.copy_frame:
return self.img_handle.copy()
else:
return self.img_handle
# def saveImg(self):
# _, img = self.cap.read
def release(self):
self._stop()
try:
self.cap.release()
except:
pass
self.is_opened = False
def __del__(self):
self.release()
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
save_restore_warnings_filters
)
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
test_crt_basic_vm_with_max_threads.py
|
'''
New Perf Test for creating KVM VM with basic L3 network.
The created number will depend on the environment variable: ZSTACK_TEST_NUM
The difference with test_basic_l3_vm_with_given_num.py is this case's max thread is 1000
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
import sys
import threading
import random
session_uuid = None
session_to = None
session_mc = None
thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD')
if not thread_threshold:
thread_threshold = 1000
else:
thread_threshold = int(thread_threshold)
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def create_vm(vm):
try:
vm.create()
except:
exc_info.append(sys.exc_info())
def test():
global session_uuid
global session_to
global session_mc
vm_num = os.environ.get('ZSTACK_TEST_NUM')
if not vm_num:
vm_num = 20
else:
vm_num = int(vm_num)
test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)
org_num = vm_num
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3PublicNetworkName')
l3 = test_lib.lib_get_l3_by_name(l3_name)
l3s = test_lib.lib_get_l3s()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
#change account session timeout.
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
session_uuid = acc_ops.login_as_admin()
vm_creation_option.set_session_uuid(session_uuid)
vm = test_vm_header.ZstackTestVm()
random_name = random.random()
vm_name = 'multihost_basic_vm_%s' % str(random_name)
vm_creation_option.set_name(vm_name)
while vm_num > 0:
check_thread_exception()
vm_creation_option.set_l3_uuids([l3.uuid])
vm.set_creation_option(vm_creation_option)
vm_num -= 1
thread = threading.Thread(target=create_vm, args=(vm,))
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', vm_name)
vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
if vms == org_num:
test_util.test_pass('Create %d VMs Test Success' % org_num)
else:
test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
#Will be called only if exception happens in test().
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if session_uuid:
acc_ops.logout(session_uuid)
|
exemple_run_threading.py
|
import threading
import time
from flask import Flask
app = Flask(__name__)
x = 0
def run_job():
while True:
print("Run recurring task")
global x
x += 1
time.sleep(3)
@app.route("/")
def hello():
return "Hello World!" + str(x)
if __name__ == "__main__":
thread = threading.Thread(target=run_job)
thread.start()
app.run()
|
test_partition_20.py
|
import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default(self):
"""
target: verify create a partition
method: 1. create a partition
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empty name
method: 1. create a partition with empty name
expected: 1. raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_empty_description(self):
"""
target: verify create a partition with empty description
method: 1. create a partition with empty description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_unique_str(prefix)
description = ""
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_description_length(self):
"""
target: verify create a partition with 255 length name and 1024 length description
method: 1. create a partition with 255 length name and 1024 length description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_str_by_length(255)
description = cf.gen_str_by_length(2048)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dup_name(self):
"""
target: verify create partitions with duplicate names
method: 1. create partitions with duplicate names
expected: 1. create successfully
2. the same partition returned with diff object ids
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str()
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
def test_partition_special_chars_description(self, description):
"""
target: verify create a partition with special characters in description
method: 1. create a partition with special characters in description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: 1. the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_length_name(self):
"""
target: verify create a partition with max length(256) name
method: 1. create a partition with max length name
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_str_by_length(256)
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: 1. create a partition with invalid names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
def test_partition_none_collection(self):
"""
target: verify create a partition with none collection
method: 1. create a partition with none collection
expected: 1. raise exception
"""
# create partition with collection is None
partition_name = cf.gen_unique_str(prefix)
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus.Collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop(self):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: 1. drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release(self):
"""
target: verify release partition
method: 1. create a collection and two partitions
2. insert data into each partition
3. flush and load the both partitions
4. release partition1
5. release partition1 twice
expected: 1. the 1st partition is released
2. the 2nd partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
search_vectors = cf.gen_vectors(1, ct.default_dim)
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, data):
"""
target: verify insert multi entities
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: 1. insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dropped_collection(self):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_same_name_in_diff_collections(self):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: 1. create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: 1. create multiple partitions in one collection
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip temporarily for debug")
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: 1. raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: drop the _default partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop_partition_twice(self):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_create_and_drop_multi_times(self):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
partition_name = cf.gen_unique_str(prefix)
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_drop_non_empty_partition(self):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, data, index_param):
"""
target: verify drop an indexed partition
method: 1.create a partition
2. insert same data
3. create an index
4. flush or not flush (remove flush step for issue # 5837)
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# # flush
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1.create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release a dropped partition
method: 1.create a partition
2. drop the partition
3. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_collection(self):
"""
target: verify release a dropped collection
method: 1.create a collection and partition
2. drop the collection
3. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_after_collection_released(self):
"""
target: verify release a partition after the collection released
method: 1.create a collection and partition
2. insert some data
3. release the collection
4. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
search_vectors = cf.gen_vectors(1, ct.default_dim)
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_default_partition(self):
"""
target: verify insert data into _default partition
method: 1.create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
partition_name = ct.default_partition_name
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
data = cf.gen_default_dataframe_data()
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into a dropped partition
method: 1.create a collection
2. insert some data into a dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_collection(self):
"""
target: verify insert data into a dropped collection
method: 1.create a collection
2. insert some data into a dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40)
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1.create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError as second_ke:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
dataengine_configure.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
import multiprocessing
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE]')
logging.info('[INSTALLING USERs KEY ON SLAVE]')
additional_config = {"user_keyname": os.environ['edge_user_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'],
data_engine['master_node_name'])
append_result("Failed to install user ssh key on slave.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR MASTER NODE]')
print('[CLEANING INSTANCE FOR MASTER NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(slave_hostname, keyfile_name, data_engine['dlab_ssh_user'], os.environ['application'])
try:
local("~/scripts/{}.py {}".format('common_clean_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to clean slave instance..", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {}". \
format(slave_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed configuring slave node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to configure slave node.", str(err))
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
print('Generating infrastructure names and tags')
data_engine = dict()
try:
data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
data_engine['exploratory_name'] = ''
try:
data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
data_engine['region'] = os.environ['azure_region']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['vpc_name'] = os.environ['azure_vpc_name']
data_engine['user_name'] = os.environ['edge_user_name'].replace('_', '-')
data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
data_engine['user_name'])
data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
data_engine['vpc_name'],
data_engine['private_subnet_name']).address_prefix
data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(data_engine['service_base_name'],
data_engine['user_name'])
data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
data_engine['user_name'])
data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
data_engine['user_name'],
data_engine['exploratory_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
data_engine['master_network_interface_name'] = '{}-nif'.format(data_engine['master_node_name'])
data_engine['master_size'] = os.environ['azure_dataengine_master_size']
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
master_node_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
data_engine['master_node_name'])
edge_instance_name = '{}-{}-edge'.format(data_engine['service_base_name'], data_engine['user_name'])
edge_instance_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
edge_instance_name)
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
key = RSA.importKey(open(keyfile_name, 'rb').read())
data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
print("Failed to generate variables dictionary.")
append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
print('[CREATING DLAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER]')
logging.info('[INSTALLING USERs KEY ON MASTER]')
additional_config = {"user_keyname": os.environ['edge_user_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to install ssh user key on master.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR MASTER NODE]')
print('[CLEANING INSTANCE FOR MASTER NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(master_node_hostname, keyfile_name, data_engine['dlab_ssh_user'], os.environ['application'])
try:
local("~/scripts/{}.py {}".format('common_clean_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to clean master instance.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(master_node_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to configure master node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
sys.exit(1)
try:
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": data_engine['master_node_name'],
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
boomcpu_no_colours.py
|
#boomcpu.py Random Bitcoin&Litecoin Legacy compressed/uncompresses address and Segwit address P2SH.Bitcoin Gold\BitcoinCash\Zcash\Doge\DASH\ETH\ZEN\ZEIT\TENT
# Look for address or HASH160 PUBLIC KEY
#One of the best and most versatile crypto scanner. Looks for 28 different addresses, HASH160 or PUBLICKEY from a txt file. Cloud not have made this without Iceland's original.
#Made by mizogg.co.uk
# Donations 3M6L77jC3jNejsd5ZU1CVpUVngrhanb6cD
import bit
from bit import *
from bit.format import bytes_to_wif
import hashlib
from bitcoinlib.encoding import pubkeyhash_to_addr_bech32, addr_bech32_to_pubkeyhash, change_base
from eth_hash.auto import keccak
import random
import threading
print("boomcpu.py Random Bitcoin&Litecoin Legacy compressed/uncompresses address and Segwit address P2SH.Bitcoin Gold\BitcoinCash\Zcash\Doge\DASH\ETH\ZEN\ZEIT\TENT List loading Good Luck...")
filename ='mixed.txt'
with open(filename) as f:
line_count = 0
for line in f:
line != "\n"
line_count += 1
with open(filename) as file:
add = file.read().split()
add = set(add)
print("\nboomcpu.py---" + "---------mizogg.co.uk---------" + "---boomcpu.py" )
def ETH_Address(un_pubk_bytes):
return '0x' + keccak(un_pubk_bytes[1:])[-20:].hex()
def HASH160(pubk_bytes):
return hashlib.new('ripemd160', hashlib.sha256(pubk_bytes).digest() ).digest()
def hash160_to_addrbech32(hash160):
return pubkeyhash_to_addr_bech32(hash160, prefix='bc', witver=0, separator='1')
print('Total Addresses\HASH160\PUBLICKEYS Loaded and Checking : ',str (line_count))
threadCount = input('How many threads to run?: ')
print("Start search... Pick Range to start (Example Puzzle 64 starting Range 18446744073709551615 ):")
x=int(input("'start range Min 1-115792089237316195423570985008687907852837564279074904382605163141518161494335 -> "))
y=int(input("stop range Max 115792089237316195423570985008687907852837564279074904382605163141518161494336 -> "))
print("Starting search... Please Wait ")
print("==========================================================")
def seek():
count=0
totalkey=0
total=0
while True:
ran= random.randint(x,y)
pk = Key.from_int(ran)
key = Key.from_int(ran)
upub = pk._pk.public_key.format(compressed=False)
cpub = pk._pk.public_key.format(compressed=True)
crmd = HASH160(cpub)
urmd = HASH160(upub)
caddr160 =crmd.hex() # Hash 160
uaddr160 =urmd.hex() # Hash 160
caddrpub = cpub.hex() #Public Keys
uaddrpub = upub.hex() #Public Keys
cwif = bytes_to_wif(key.to_bytes(), compressed=True) # compressed WIF
uwif = bytes_to_wif(key.to_bytes(), compressed=False) # uncompressed WIF
privatekey = key.to_hex()
key1 = Key(uwif)
caddr = key.address #BTC (compressed)
uaddr = key1.address #BTC (uncompressed)
paddr = key.segwit_address # BTC (P2SH)
bcaddr = pubkeyhash_to_addr_bech32(crmd, prefix='bc', witver=0, separator='1') #BTC (bech32 p2wpkh)
lcaddr = bit.base58.b58encode_check(b'\x30' + crmd) #LTC (compressed)
luaddr = bit.base58.b58encode_check(b'\x00' + urmd) #LTC (uncompressed)
lpaddr = bit.base58.b58encode_check(b'\x32' + HASH160(b'\x00\x14' + crmd)) #LTC (P2SH)
lbpaddr = pubkeyhash_to_addr_bech32(crmd, prefix='ltc', witver=0, separator='1') #LTC (bech32 p2wpkh)
btgcaddr = bit.base58.b58encode_check(b'\x26' + crmd) #BTG (compressed)
btguaddr = bit.base58.b58encode_check(b'\x26' + urmd) #BTG (uncompressed)
btgpaddr = bit.base58.b58encode_check(b'\x17' + HASH160(b'\x00\x14' + crmd)) #BTG (P2SH)
btgbpaddr = pubkeyhash_to_addr_bech32(crmd, prefix='btg', witver=0, separator='1') #BTG (bech32 p2wpkh)
btczcaddr = bit.base58.b58encode_check(b'\x1c\xb8' + crmd) #BTCZ (compressed)
btczuaddr = bit.base58.b58encode_check(b'\x1c\xb8' + urmd) #BTCZ (uncompressed)
tentcaddr = bit.base58.b58encode_check(b'\x1c\x28' + crmd) #TENT (compressed)
tentuaddr = bit.base58.b58encode_check(b'\x1c\x28' + urmd) #TENT (uncompressed)
dogecaddr = bit.base58.b58encode_check(b'\x1e' + crmd) #DOGE (compressed)
dogeuaddr = bit.base58.b58encode_check(b'\x1e' + urmd) #DOGE (uncompressed)
dashcaddr = bit.base58.b58encode_check(b'\x4c' + crmd) #DASH (compressed)
dashuaddr = bit.base58.b58encode_check(b'\x4c' + urmd) #DASH (uncompressed)
smartcaddr = bit.base58.b58encode_check(b'\x3f' + crmd) #SMC (compressed)
smartuaddr = bit.base58.b58encode_check(b'\x3f' + urmd) #SMC (uncompressed)
zcashcaddr = bit.base58.b58encode_check(b'\x1c\xb8' + crmd) #ZCash ZLC ZERO (compressed)
zcashuaddr = bit.base58.b58encode_check(b'\x1c\xb8' + urmd) #ZCash ZLC ZERO (uncompressed)
zencaddr = bit.base58.b58encode_check(b'\x20\x89' + crmd) #ZEN (compressed)
zenuaddr = bit.base58.b58encode_check(b'\x20\x89' + urmd) #ZEN (uncompressed)
zeitcaddr = bit.base58.b58encode_check(b'\x33' + crmd) #ZEIT (compressed)
zeituaddr = bit.base58.b58encode_check(b'\x20\x89' + urmd) #ZEIT (uncompressed)
eaddr = ETH_Address(upub)
count+=1*int(threadCount)
totalkey+=3*int(threadCount)
total+=28*int(threadCount)
if caddr in add or uaddr in add or paddr in add or bcaddr in add or lcaddr in add or luaddr in add or lpaddr in add or lbpaddr in add or btgcaddr in add or btguaddr in add or btgpaddr in add or btgbpaddr in add or btczcaddr in add or btczuaddr in add or tentcaddr in add or tentuaddr in add or dogecaddr in add or dogeuaddr in add or dashcaddr in add or dashuaddr in add or smartcaddr in add or smartuaddr in add or zcashcaddr in add or zcashuaddr in add or zencaddr in add or zenuaddr in add or zeitcaddr in add or zeituaddr in add or eaddr in add or caddr160 in add or uaddr160 in add or caddrpub in add or uaddrpub in add or crmd in add or urmd in add:
print("Matching Key ==== Found!!!\n PrivateKey: " + privatekey +'\n')
print("\nboomcpu.py------------mizogg.co.uk------------boomcpu.py\n" )
print('Total Addresses\HASH160\PUBLICKEYS Loaded and Checking : ', line_count)
print('=========================== Address ==================================')
print( 'Bitcoin BTC (compressed) : ' + caddr)
print( 'Bitcoin BTC (uncompressed) : ' + uaddr)
print( 'Bitcoin BTC (P2SH) : ' + paddr)
print( 'Bitcoin BTC (bech32 p2wpkh) : ' + bcaddr)
print( 'Litecoin LTC (compressed) : ' + lcaddr)
print( 'Litecoin LTC (uncompressed) : ' + luaddr)
print( 'Litecoin LTC (P2SH) : ' + lpaddr)
print( 'Litecoin LTC (bech32 p2wpkh) : ' + lbpaddr)
print( 'Bitcoin GOLD BTG (compressed) : ' + btgcaddr)
print( 'Bitcoin GOLD BTG (uncompressed) : ' + btguaddr)
print( 'Bitcoin GOLD BTG (P2SH) : ' + btgpaddr)
print( 'Bitcoin GOLD BTG (bech32 p2wpkh) : ' + btgbpaddr)
print( 'BITCOINZ BTCZ (compressed) : ' + btczcaddr)
print( 'BITCOINZ BTCZ (uncompressed) : ' + btczuaddr)
print( 'TENT coin TENT (compressed) : ' + tentcaddr)
print( 'TENT coin TENT (uncompressed) : ' + tentuaddr)
print( 'Doge Coin DOGE (compressed) : ' + dogecaddr)
print( 'Doge Coin DOGE (uncompressed) : ' + dogeuaddr)
print( 'Dash Coin DASH (compressed) : ' + dashcaddr)
print( 'Dash Coin DASH (uncompressed) : ' + dashuaddr)
print( 'SmartCoin SMC (compressed) : ' + smartcaddr)
print( 'SmartCoin SMC (uncompressed) : ' + smartuaddr)
print( 'Zcash ZCASH (compressed) : ' + zcashcaddr)
print( 'Zcash ZCASH (uncompressed) : ' + zcashuaddr)
print( 'Horizen ZEN (compressed) : ' + zencaddr)
print( 'Horizen ZEN (uncompressed) : ' + zenuaddr)
print( 'Zeitcoin ZEIT (compressed) : ' + zeitcaddr)
print( 'Zeitcoin ZEIT (uncompressed) : ' + zeituaddr)
print( 'Ethereum ETH : ' + eaddr)
print('=========================== Hash 160 =================================')
print( 'compressed Hash 160 : ' , crmd.hex())
print( 'uncompressed Hash 160 : ' , urmd.hex())
print('=========================== Private Keys =============================')
print('BTC Private Key (compressed WIF) : ' +cwif)
print('BTC Private Key(Uncompressed WIF) : ' + uwif)
print('Private key HEX : ' + privatekey)
print('=========================== Public Keys ==============================')
print('compressed Public Key : ' , cpub.hex())
print('uncompressed Public Key : ' , upub.hex())
f=open(u"winner.txt","a")
f.write('\nboomcpu.py made by mizogg.co.uk donations 3M6L77jC3jNejsd5ZU1CVpUVngrhanb6cD')
f.write('\n')
f.write('\nBitcoin BTC (compressed) :' + caddr)
f.write('\nBitcoin BTC (uncompressed) :' + uaddr)
f.write('\nBitcoin BTC (P2SH) :' + paddr)
f.write('\nBitcoin BTC (bech32 p2wpkh) :' + bcaddr)
f.write('\nLitecoin LTC (compressed) :' + lcaddr)
f.write('\nLitecoin LTC (uncompressed) :' + luaddr)
f.write('\nLitecoin LTC (P2SH) :' + lpaddr)
f.write('\nLitecoin LTC (bech32 p2wpkh) :' + lbpaddr)
f.write('\nBitcoin GOLD BTG (compressed) :' + btgcaddr)
f.write('\nBitcoin GOLD BTG (uncompressed) :' + btguaddr)
f.write('\nBitcoin GOLD BTG (P2SH) :' + btgpaddr)
f.write('\nBitcoin GOLD BTG (bech32 p2wpkh) :' + btgbpaddr)
f.write('\nBITCOINZ BTCZ (compressed) :' + btczcaddr)
f.write('\nBITCOINZ BTCZ (uncompressed) :' + btczuaddr)
f.write('\nTENT coin TENT (compressed) :' + tentcaddr)
f.write('\nTENT coin TENT (uncompressed) :' + tentuaddr)
f.write('\nDoge Coin DOGE (compressed) :' + dogecaddr)
f.write('\nDoge Coin DOGE (uncompressed) :' + dogeuaddr)
f.write('\nDash Coin DASH (compressed) :' + dashcaddr)
f.write('\nDash Coin DASH (uncompressed) :' + dashuaddr)
f.write('\nSmartCoin SMC (compressed) :' + smartcaddr)
f.write('\nSmartCoin SMC (uncompressed) :' + smartuaddr)
f.write('\nZcash ZCASH (compressed) :' + zcashcaddr)
f.write('\nZcash ZCASH (uncompressed) :' + zcashuaddr)
f.write('\nHorizen ZEN (compressed) :' + zencaddr)
f.write('\nHorizen ZEN (uncompressed) :' + zenuaddr)
f.write('\nZeitcoin ZEIT (compressed) :' + zeitcaddr)
f.write('\nZeitcoin ZEIT (uncompressed) :' + zeituaddr)
f.write('\nEthereum ETH :' + eaddr)
f.write('\n=========================== Hash 160 =================================')
f.write('\ncompressed Hash 160 : '+ caddr160)
f.write('\nuncompressed Hash 160 : '+ uaddr160)
f.write('\n=========================== Private Keys =============================')
f.write('\nBTC Private Key (compressed WIF) : ' + cwif)
f.write('\nBTC Private Key(Uncompressed WIF) : ' + uwif)
f.write('\nPrivate key HEX : ' + privatekey)
f.write('\n=========================== Public Keys ==============================')
f.write('\ncompressed Public Key : ' + caddrpub)
f.write('\nuncompressed Public Key : ' + uaddrpub)
f.write('\n')
else:
print("\nboomcpu.py---" + "---------mizogg.co.uk---------" + "---boomcpu.py" )
print('Total Addresses\HASH160\PUBLICKEYS Loaded and Checking : ', line_count)
print('=========================== Address ==================================')
print( 'Bitcoin BTC (compressed) : ' + caddr)
print( 'Bitcoin BTC (uncompressed) : ' + uaddr)
print( 'Bitcoin BTC (P2SH) : ' + paddr)
print( 'Bitcoin BTC (bech32 p2wpkh) : ' + bcaddr)
print( 'Litecoin LTC (compressed) : ' + lcaddr)
print( 'Litecoin LTC (uncompressed) : ' + luaddr)
print( 'Litecoin LTC (P2SH) : ' + lpaddr)
print( 'Litecoin LTC (bech32 p2wpkh) : ' + lbpaddr)
print( 'Bitcoin GOLD BTG (compressed) : ' + btgcaddr)
print( 'Bitcoin GOLD BTG (uncompressed) : ' + btguaddr)
print( 'Bitcoin GOLD BTG (P2SH) : ' + btgpaddr)
print( 'Bitcoin GOLD BTG (bech32 p2wpkh) : ' + btgbpaddr)
print( 'BITCOINZ BTCZ (compressed) : ' + btczcaddr)
print( 'BITCOINZ BTCZ (uncompressed) : ' + btczuaddr)
print( 'TENT coin TENT (compressed) : ' + tentcaddr)
print( 'TENT coin TENT (uncompressed) : ' + tentuaddr)
print( 'Doge Coin DOGE (compressed) : ' + dogecaddr)
print( 'Doge Coin DOGE (uncompressed) : ' + dogeuaddr)
print( 'Dash Coin DASH (compressed) : ' + dashcaddr)
print( 'Dash Coin DASH (uncompressed) : ' + dashuaddr)
print( 'SmartCoin SMC (compressed) : ' + smartcaddr)
print( 'SmartCoin SMC (uncompressed) : ' + smartuaddr)
print( 'Zcash ZCASH (compressed) : ' + zcashcaddr)
print( 'Zcash ZCASH (uncompressed) : ' + zcashuaddr)
print( 'Horizen ZEN (compressed) : ' + zencaddr)
print( 'Horizen ZEN (uncompressed) : ' + zenuaddr)
print( 'Zeitcoin ZEIT (compressed) : ' + zeitcaddr)
print( 'Zeitcoin ZEIT (uncompressed) : ' + zeituaddr)
print( 'Ethereum ETH : ' + eaddr)
print('=========================== Hash 160 =================================')
print( 'compressed Hash 160 : ' , crmd.hex())
print( 'uncompressed Hash 160 : ' , urmd.hex())
print('=========================== Private Keys =============================')
print('BTC Private Key (compressed WIF) : ' +cwif)
print('BTC Private Key(Uncompressed WIF) : ' + uwif)
print('Private key HEX : ' + privatekey)
print('=========================== Public Keys ==============================')
print('compressed Public Key : ' , cpub.hex())
print('uncompressed Public Key : ' , upub.hex())
#print ( 'Running Scan : ' + str (count) + ' : ' + 'Total Private Keys : ' + str(totalkey) + ' : ' + 'Total Addresses : ' + str (total) + ' : ' + seconds_to_str(), end='\r' )
threads = []
for i in range(int(threadCount)):
t = threading.Thread(target=seek)
threads.append(t)
t.start()
|
spider.py
|
from . import browser
from . import crawler
import re
from colorama import Fore, Style
import queue
from threading import Thread
import logging
B_BLUE = Style.BRIGHT+Fore.BLUE
B_WHITE = Style.BRIGHT+Fore.WHITE
B_RED = Style.BRIGHT+Fore.RED
B_CYAN = Style.BRIGHT+Fore.CYAN
RESET = Style.RESET_ALL
GREEN = Fore.GREEN
YELLOW = Fore.YELLOW
class Spider(object):
def __init__(self, url, level=None, cookie=None, fast=None):
self.logger = logging.getLogger("Spider")
self.logger.setLevel(logging.DEBUG)
handler = logging.FileHandler("Spaydi.log")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.level = level if level is not None and level in [1,2,3,4,5] else 3
self.fast_mode = fast if fast is not None else False
self.cookie = cookie
self.target_url = url
self.visited_urns = []
self.visited_urls = []
self.output_forms = []
self.visit_urls = []
self.printed_action = []
self.target_domain = self.get_domain(self.target_url)
self.browser = browser.Browser()
self.crawler = crawler.Crawler()
def get_domain(self, url):
_domain = re.findall(r'https?://(.*?)\/', url)
_domain2 = re.findall(r'https?://(.*?)$', url)
r_domain = _domain[0] if _domain else _domain2[0]
return r_domain
def set_link(self, link):
if "javascript:" in link:
return False
if link.startswith("http") or link.startswith("https"):
_domain = self.get_domain(link)
if _domain == self.target_domain:
return link
else:
return False
else:
link = "http://{}/{}".format(self.target_domain, link)
return link
def print_forms(self, forms):
_form = ""
print("{1}Page Title:{2} {3}{0}{2}".format(self.browser.page_title,
B_BLUE, RESET, GREEN))
print("{1}Set-Cookie:{2} {3}{0}{2}".format(self.browser.set_cookie,
B_BLUE, RESET, GREEN))
for i in forms:
if i['form_action'] in self.printed_action:
continue
print("-"*30+"<FORM>"+"-"*30)
print("{1}Page URL :{2} {3}{0}{2}".format(i['url'], B_BLUE, RESET,
GREEN))
print("{1}ACTION:{2} {3}{0}{2}".format(i['form_action'].lower(),
B_BLUE, RESET, GREEN))
print("{1}METHOD:{2} {3}{0}{2}".format(i['form_method'].upper(),
B_BLUE, RESET, GREEN))
_form += "Page Title: {}\n".format(self.browser.page_title)
_form += "Set-Cookie: {}\n".format(self.browser.set_cookie)
_form += "Page Url: {}\n".format(i['url'])
_form += "ACTION: {}\n".format(i['form_action'].lower())
_form += "METHOD: {}\n".format(i['form_method'].upper())
for input in i['inputs']:
_form += "[input] "
_text = "{0}[input]{1}{2} ".format(B_BLUE, RESET, GREEN)
_name = input['name']
_type = input['type']
_value = input['value']
_placeholder = input['placeholder']
_text += "{1}name{2}={3}'{0}'{2}, ".format(_name, YELLOW, RESET,
GREEN) if _name \
is not "" else ""
_form += "name='{}', ".format(_name) if _name is not "" else ""
_text += "{1}type{2}={3}'{0}'{2}, ".format(_type, YELLOW, RESET,
GREEN) if _type \
is not "" else ""
_form += "type='{}', ".format(_type) if _type is not "" else ""
_text += "{1}value{2}={3}'{0}{2}', ".format(_value, YELLOW,
RESET, GREEN) \
if _value is not "" \
else ""
_form += "value='{}', ".format(_value) if _value is not "" else ""
_text += "{1}placeholder{2}={3}'{0}'{2}".format(_placeholder,
YELLOW, RESET,
GREEN) \
if _placeholder \
is not "" else ""
_form += "placeholder='{}', ".format(_placeholder) if _placeholder \
is not "" else ""
_form += "\n"
print(_text+RESET)
self.printed_action.append(i['form_action'])
self.output_forms.append(_form)
print("-"*30+"</FORM>"+"-"*30)
def clean_link(self, link):
point = False
_link = ""
if "#" in link:
for i in link:
if i == "#" or point is True:
point = True
continue
_link += i
return _link
return link
def just_urn(self, link):
if link.startswith("https://"):
link = link[8::]
return link
link = link[7::]
return link
def loop(self):
# self.logger.info("loop() started.")
_url_list = []
for link in self.visit_urls:
link = self.clean_link(link)
if self.just_urn(link) in self.visited_urns:
continue
stat = self.browser.get(url=link, cookie=self.cookie)
print("{1}{0}{2}".format("--"*40, B_RED, RESET))
print("{1}[GET]{2} {0}".format(link, B_CYAN, RESET))
self.visited_urns.append(self.just_urn(link))
self.visited_urls.append(link)
if stat in [200, 302, 301]:
forms = self.crawler.get_forms(self.browser.page_source, link)
self.print_forms(forms)
links = self.crawler.get_urls(self.browser.page_source)
for i in links:
_link = self.set_link(i)
if _link and self.just_urn(_link) not in self.visited_urns:
_url_list.append(_link)
del self.visit_urls[:]
_url_list = list(set(_url_list))
self.visit_urls = _url_list[:]
del _url_list
def t_process(self, link):
# self.logger.info("t_process() started.")
_url_list = []
stat = self.browser.get(url=link, cookie=self.cookie)
print("{1}{0}{2}".format("--"*40, B_RED, RESET))
print("{1}[GET]{2} {0} {3}".format(link, B_CYAN, RESET, stat))
self.visited_urns.append(self.just_urn(link))
self.visited_urls.append(link)
if stat in [200, 302, 301]:
forms = self.crawler.get_forms(self.browser.page_source, link)
self.print_forms(forms)
links = self.crawler.get_urls(self.browser.page_source)
for i in links:
_link = self.set_link(i)
if _link and self.just_urn(_link) not in self.visited_urns:
_url_list.append(_link)
return _url_list
def t_loop(self):
# self.logger.info("t_loop() started.")
_url_list = []
que = queue.Queue()
thread_list = []
th_count = 0
max_thrad = 5
l_visit_urls = len(self.visit_urls)
for link in self.visit_urls:
link = self.clean_link(link)
if self.just_urn(link) in self.visited_urns:
l_visit_urls -= 1
continue
t = Thread(target=lambda q, arg1: q.put(self.t_process(arg1)), \
args=(que, link))
t.start()
thread_list.append(t)
th_count += 1
l_visit_urls -= 1
self.logger.info("Started Thread-{}".format(th_count))
if th_count%max_thrad == 0:
self.logger.info("Waiting for Thread Process...")
for t in thread_list:
t.join()
self.logger.info("Thread Process Finished..")
elif l_visit_urls < 5:
self.logger.info("Waiting for Thread Process...")
for t in thread_list:
t.join()
self.logger.info("Thread Process Finished..")
while not que.empty():
_url_list.extend(que.get())
del self.visit_urls[:]
_url_list = list(set(_url_list))
self.visit_urls = _url_list[:]
del _url_list
def go(self):
self.logger.info("go() started.")
# level 1
stat = self.browser.get(self.target_url, cookie=self.cookie)
print("{1}{0}{2}".format("--"*40, B_RED, RESET))
if stat in [200, 302, 301]:
forms = self.crawler.get_forms(self.browser.page_source, self.target_url)
self.print_forms(forms)
links = self.crawler.get_urls(self.browser.page_source)
for i in links:
_link = self.set_link(i)
if _link:
self.visit_urls.append(_link)
self.visit_urls = list(set(self.visit_urls))
self.logger.info("t_loop() started with Fast mode")
if self.fast_mode:
for i in range(self.level-1):
self.logger.info("t_loop() started with Fast mode")
self.t_loop()
else:
for i in range(self.level-1):
self.logger.info("t_loop() started with Normal mode")
self.loop()
else:
print(stat)
return self.visited_urls, self.output_forms
if __name__ == '__main__':
pass
|
viz.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import time
import logging
import omegaconf
import hydra
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly
import plotly.subplots
import queue
import threading
import collections
# include Python files generated by CMake
package_name = "polymetis"
current_dir = os.path.dirname(os.path.realpath(__file__))
catkin_build_path = os.path.abspath(
os.path.join(current_dir, f"../../../build/{package_name}")
)
sys.path.append(catkin_build_path)
from google.protobuf.internal.encoder import _VarintBytes
from google.protobuf.internal.decoder import _DecodeVarint32
import grpc
import polymetis_pb2
import polymetis_pb2_grpc
def write_protobuf(file, message):
size = message.ByteSize()
file.write(_VarintBytes(size))
file.write(message.SerializeToString())
return file
def read_protobuf(file, protobuf_message_class):
if type(file) == str:
filename = file
file = open(filename, "rb")
buf = file.read()
pos = 0
while pos < len(buf):
msg_len, pos = _DecodeVarint32(buf, pos)
msg_buf = buf[pos : pos + msg_len]
pos += msg_len
message = protobuf_message_class()
message.ParseFromString(msg_buf)
yield message
class RobotStateVisualizer:
def __init__(
self,
server_ip="localhost",
server_port=50051,
max_queue_size=10000,
downsampling_ratio=100,
log_keys={"joint_positions", "joint_velocities", "joint_torques_computed"},
logfile="",
):
self.log_keys = log_keys
if logfile:
self.stream_live_data = False
print(f"Reading data from {logfile}")
self.state_queue = queue.Queue()
for i, robot_state in enumerate(
read_protobuf(logfile, polymetis_pb2.RobotState)
):
self.state_queue.put((i, robot_state))
else:
server_connection = f"{server_ip}:{server_port}"
print(f"Streaming data from server {server_connection}")
self.stream_live_data = True
# Setup logfile to write robot_states
logfile_path = os.path.join(os.getcwd(), "logfile.bin")
self.logfile = open(logfile_path, "wb")
print(f"Saving logfile to {logfile_path}")
# Set up connection
self.channel = grpc.insecure_channel(server_connection)
self.grpc_connection = polymetis_pb2_grpc.PolymetisControllerServerStub(
self.channel
)
# Connect to RPC
self.stream = self.grpc_connection.GetRobotStateStream(
polymetis_pb2.Empty()
)
# Concurrently read from stream in a different thread
self.state_queue = queue.Queue(maxsize=max_queue_size)
self.streaming_thread = threading.Thread(
target=self.update, args=(), daemon=True
)
self.streaming_thread.start()
self.step = 0
self.downsampling_ratio = downsampling_ratio
def __del__(self):
if self.stream_live_data:
self.channel.close()
self.logfile.close()
def update(self):
if self.stream_live_data:
for robot_state in self.stream:
if self.step % self.downsampling_ratio == 0:
self.state_queue.put((self.step, robot_state))
self.step += 1
def process_queue(self):
dataframes = {}
while not self.state_queue.empty():
step, robot_state = self.state_queue.get()
if self.stream_live_data:
write_protobuf(self.logfile, robot_state)
curr_datetime = robot_state.timestamp.ToDatetime()
for field, values in robot_state.ListFields():
if field.name in self.log_keys:
try:
values_dim = len(values)
except TypeError:
values_dim = 1
if field.name not in dataframes:
df = pd.DataFrame(
columns=["datetime"] + list(range(values_dim))
).set_index("datetime")
dataframes[field.name] = df
df = dataframes[field.name]
value_dict = {x[0]: x[1] for x in enumerate(values)}
df.loc[curr_datetime] = value_dict
return dataframes
def initialize_graphs(viz, height=1000):
while viz.state_queue.empty():
print("Waiting for states...")
time.sleep(1)
dataframes = viz.process_queue()
num_dataframes = len(dataframes)
max_columns = max([len(df.columns) for df in dataframes.values()])
# Generate subplot titles
subplot_titles = []
row = 1
for df_name, df in dataframes.items():
col = 1
for column in df.columns:
subplot_titles.append(f"{df_name}: {column}")
col += 1
for i in range(col, max_columns + 1):
subplot_titles.append("")
row += 1
fig = plotly.subplots.make_subplots(
rows=num_dataframes, cols=max_columns, subplot_titles=subplot_titles
)
fig.update_layout(height=height, showlegend=False)
# Add graph objects
row = 1
for df_name, df in dataframes.items():
datetimes = [x.strftime("%H:%M:%S") for x in df.index]
col = 1
for column in df.columns:
fig.append_trace(
plotly.graph_objects.Scatter(
x=datetimes, y=df[column], line=dict(color="black")
),
row=row,
col=col,
)
col += 1
row += 1
return fig
@hydra.main(config_path="../polymetis/conf/", config_name="viz")
def main(cfg):
print(f"Config:\n{omegaconf.OmegaConf.to_yaml(cfg)}")
# Connect to server...
visualizer = RobotStateVisualizer(
server_ip=cfg.server_ip,
server_port=cfg.server_port,
max_queue_size=cfg.max_queue_size,
downsampling_ratio=cfg.downsampling_ratio,
log_keys=set(cfg.log_keys),
logfile=cfg.logfile,
)
# Create initial figure
fig = initialize_graphs(visualizer)
# Create app
style = omegaconf.OmegaConf.to_container(cfg.app.external_stylesheets, resolve=True)
app = dash.Dash("controller_manager_visualizer", external_stylesheets=style)
# Define app layout
app.layout = html.Div(
children=[
html.H1(children="Controller manager visualization"),
dcc.Graph(id="live-update-graph", figure=fig),
dcc.Interval(
id="interval-component",
interval=cfg.update_interval_seconds * 1000,
n_intervals=0,
),
]
)
@app.callback(
Output("live-update-graph", "extendData"),
[Input("interval-component", "n_intervals")],
)
def update_graph_data(n_intervals):
dataframes = visualizer.process_queue()
updated_data = {"x": [], "y": []}
num_traces = 0
# Add graph objects
for df_name, df in dataframes.items():
datetimes = [x.strftime("%H:%M:%S") for x in df.index]
for column in df.columns:
num_traces += 1
updated_data["x"].append(datetimes)
updated_data["y"].append(df[column])
trace_indices = list(range(num_traces))
return [updated_data, trace_indices]
app.logger.setLevel(logging.WARNING)
app.run_server(debug=cfg.app.debug, host=cfg.app.host, port=cfg.app.port)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.